diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml index 6bd59fd..323237b 100644 --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -1,3 +1 @@ -style = "sciml" -format_docstrings = true -margin = 80 +style = "blue" diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..10e19a4 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +JULIA=$(shell which julia) +TEST_PROCEDURE="import Pkg;Pkg.test()" + +format: + $(JULIA) format_project.jl + +test%: + $(JULIA) $* --project -e $(TEST_PROCEDURE) 2&> test-$*.log + +testall: test+lts test+beta test+release + +clean: + rm test*.log + +.PHONY: format, test, testall, clean diff --git a/Project.toml b/Project.toml index 0ec622c..f60090e 100644 --- a/Project.toml +++ b/Project.toml @@ -7,21 +7,31 @@ version = "0.3.0" AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" HypertextLiteral = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2" -OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" SnoopPrecompile = "66db9d55-30c0-4569-8b51-7e840670fc0c" TimeZones = "f269a46b-ccf7-5d73-abea-4c690281aa53" +[weakdeps] +TimeZones = "f269a46b-ccf7-5d73-abea-4c690281aa53" + +[extensions] +TimeZonesExt = "TimeZones" + [compat] AbstractTrees = "0.4" -OrderedCollections = "1" -SnoopPrecompile = "1" HypertextLiteral = "0.9" +SnoopPrecompile = "1" TimeZones = "1" julia = "1" [extras] +AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" +Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" +Gumbo = "708ec375-b3d6-5a57-a7ce-8257bf98657a" +JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" +JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +pandoc_jll = "c5432543-76ad-5c9d-82bf-db097047a5e2" [targets] -test = ["Test"] +test = ["AbstractTrees", "Aqua", "Gumbo", "JET", "JSON", "Test", "pandoc_jll"] diff --git a/README.md b/README.md index b6547fb..72af198 100644 --- a/README.md +++ b/README.md @@ -173,3 +173,7 @@ There are three main steps for turning Norg files into HTML (since it's the only 3. Code generation (turning the AST into HTML) Earlier Norg.jl would rely on Julia's type system, but that made the code type-unstable. That's why I refactored it using a kind of enumeration to label each token and node of the AST. I did not invent anything here, it comes straight from [JuliaSyntax.jl](https://github.com/JuliaLang/JuliaSyntax.jl/) super cool ideas. + +## Projects using Norg.jl + +- [neorg-dashboard](https://github.com/isentropic/neorg-dashboard), a simple filewatcher and renderer of norg in julia. diff --git a/docs/make.jl b/docs/make.jl index 9a914f1..290a7a4 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -10,18 +10,19 @@ end; md_path = joinpath(@__DIR__, "src", "1.0-specification.md") ast = norg(s) function mk_toc(ast) - toc_tree = filter(!isnothing, [mk_toc(ast, c) for c in children(ast.root)]) + return toc_tree = filter(!isnothing, [mk_toc(ast, c) for c in children(ast.root)]) end function mk_toc(ast, node) c = children(node) if !Norg.AST.is_heading(node) nothing - else - h, node_children... = c + else + h, node_children... = c level = Norg.AST.heading_level(node) - (title=Norg.Codegen.textify(ast, h), - level = level, - children=filter([mk_toc(ast, c) for c in node_children]) do c + ( + title=Norg.Codegen.textify(ast, h), + level=level, + children=filter([mk_toc(ast, c) for c in node_children]) do c if isnothing(c) false elseif c.level >= 3 @@ -31,16 +32,14 @@ function mk_toc(ast, node) else true end - end + end, ) end end toc = mk_toc(ast) function mk_html_toc(toc_elem) - href = "#"*"h$(toc_elem.level)-"*Norg.Codegen.idify(toc_elem.title) - lis = [ - @htl("
does not have an 'item' notion, so we have to short-circuit # that. - res = [] + res = String[] for c in children(node) - append!(res, filter(!isempty, codegen.(Ref(t), Ref(ast), children(c)))|>collect) + append!(res, collect(filter(!isempty, codegen.(Ref(t), Ref(ast), children(c))))) end - OrderedDict([ - "t"=>"BlockQuote" - "c"=>res + return jsonify([ + :t => jsonify(:BlockQuote) + :c => jsonify(res) ]) end @@ -412,26 +521,41 @@ function codegen(t::JSONTarget, ::StandardRangedTag, ast::NorgDocument, node::No tag, others... = children(node) tag_litteral = litteral(ast, tag) if tag_litteral == "comment" - OrderedDict() + "" elseif tag_litteral == "example" - OrderedDict([ - "t"=>"CodeBlock" - "c"=>[["", ["norg"], []], textify(ast, last(others))] - ]) + jsonify( + [ + :t => jsonify(:CodeBlock) + :c => jsonify([ + jsonify([Symbol(""), jsonify(["\"norg\""]), jsonify([])]), + "\"" * textify(ast, last(others), escape_string) * "\"", + ]) + ], + ) elseif tag_litteral == "details" # TODO - OrderedDict() + "" elseif tag_litteral == "group" - OrderedDict([ - "t"=>"Div", - "c"=>[["", [], []], codegen_children(t, ast, last(others))] + jsonify([ + :t => jsonify(:Div), + :c => jsonify([ + jsonify([Symbol(""), jsonify([]), jsonify([])]), + jsonify(codegen_children(t, ast, last(others))), + ]), ]) else - @warn "Unknown standard ranged tag." tag_litteral ast.tokens[AST.start(node)] ast.tokens[AST.stop(node)] - OrderedDict([ - "t"=>"Div", - "c"=>[["", [], []], codegen_children(t, ast, last(others))] - ]) + @warn "Unknown standard ranged tag." tag_litteral ast.tokens[AST.start(node)] ast.tokens[AST.stop( + node + )] + jsonify( + [ + :t => jsonify(:Div) + :c => jsonify([ + jsonify([Symbol(""), jsonify([]), jsonify([])]), + jsonify(codegen_children(t, ast, last(others))), + ]) + ], + ) end end @@ -439,23 +563,33 @@ function codegen(::JSONTarget, ::Verbatim, ast::NorgDocument, node::Node) # cowardly ignore any verbatim that is not code tag, others... = children(node) if litteral(ast, tag) != "code" - return OrderedDict() + return "" end if length(others) == 1 - OrderedDict([ - "t"=>"CodeBlock" - "c"=>[["", [], []], textify(ast, last(others))] - ]) + jsonify( + [ + :t => jsonify(:CodeBlock) + :c => jsonify([ + jsonify([Symbol(""), jsonify([]), jsonify([])]), + "\"" * textify(ast, last(others), escape_string) * "\"", + ]) + ], + ) else language = if kind(first(others)) == K"TagParameter" litteral(ast, first(others)) else litteral(ast, others[2]) end - OrderedDict([ - "t"=>"CodeBlock" - "c"=>[["", [language], []], textify(ast, last(others))] - ]) + jsonify( + [ + :t => jsonify(:CodeBlock) + :c => jsonify([ + jsonify([Symbol(""), jsonify(["\"" * language * "\""]), jsonify([])]), + "\"" * textify(ast, last(others), escape_string) * "\"", + ]) + ], + ) end end @@ -467,74 +601,90 @@ function codegen(::JSONTarget, ::TodoExtension, ast::NorgDocument, node::Node) else s = "☐" end - OrderedDict([ - "t"=>"Plain" - "c"=>[OrderedDict([ - "t"=>"Str" - "c"=>s - ])] - ]) + return jsonify( + [ + :t => jsonify(:Plain) + :c => jsonify([jsonify([ + :t => jsonify(:Str) + :c => s + ])]) + ] + ) end -function codegen(t::JSONTarget, c::Union{WeakCarryoverTag, StrongCarryoverTag}, ast::NorgDocument, node::Node) +function codegen( + t::JSONTarget, + c::Union{WeakCarryoverTag,StrongCarryoverTag}, + ast::NorgDocument, + node::Node, +) content = codegen(t, ast, last(children(node))) - label = textify(ast, first(children(node))) + label = "\"" * textify(ast, first(children(node)), escape_string) * "\"" # TODO: there's most likely some room for improvement here, as some contents # already have a mechanism for attributes, so the Div is not needed. - attr = ["", [], []] + attr = [Symbol(""), jsonify([]), jsonify([])] if length(children(node)) <= 2 - attr[2] = [label] + attr[2] = jsonify([label]) elseif length(children(node)) == 3 - attr[3] = [[label, textify(ast, children(node)[2])]] + attr[3] = jsonify([ + jsonify([label, "\"" * textify(ast, children(node)[2], escape_string) * "\""]) + ]) else - attr[2] = [join(textify.(Ref(ast), children(node)[1:end-1]), "-")] + attr[2] = jsonify([ + "\"" * + join(textify.(Ref(ast), children(node)[1:(end - 1)], escape_string), "-") * + "\"", + ]) end - t = if kind(node) == K"WeakCarryoverTag" && kind(last(children(node)))==K"ParagraphSegment" - "Span" - else - "Div" - end - if !(content isa Vector) - content = [content] + t = + if kind(node) == K"WeakCarryoverTag" && + kind(last(children(node))) == K"ParagraphSegment" + :Span + else + :Div + end + if !(first(content) == '[') + content = jsonify([content]) end - OrderedDict([ - "t"=>t, - "c"=>[attr, content] - ]) + return jsonify([:t => jsonify(t), :c => jsonify([jsonify(attr), content])]) end function codegen(t::JSONTarget, ::Definition, ast::NorgDocument, node::Node) items = children(node) - OrderedDict([ - "t"=>"DefinitionList" - "c"=>map(items) do item - term, def... = children(item) - term_id = "def_" * idify(textify(ast, term)) - term_node = OrderedDict([ - "t"=>"Span" - "c"=>[ - (term_id, [], []), - codegen(t, ast, term) - ] - ]) - def_node = codegen.(Ref(t), Ref(ast), def) - ([term_node], [def_node]) - end - ]) -end - -function codegen(t::JSONTarget, ::Footnote, ast::NorgDocument, node::Node) + return jsonify( + [ + :t => jsonify(:DefinitionList) + :c => jsonify(map(items) do item + term, def... = children(item) + term_id = "def_" * idify(textify(ast, term, escape_string)) + term_node = jsonify( + [ + :t => jsonify(:Span) + :c => jsonify([ + jsonify(["\"" * term_id * "\"", jsonify([]), jsonify([])]), + jsonify([codegen(t, ast, term)]), + ]) + ], + ) + def_node = jsonify(codegen.(Ref(t), Ref(ast), def)) + jsonify([jsonify([term_node]), jsonify([def_node])]) + end) + ], + ) +end + +function codegen(::JSONTarget, ::Footnote, ast::NorgDocument, node::Node) # Return nothing, pandoc expects footnotes to be defined where they are called. - [] + return "" end function codegen(t::JSONTarget, ::Slide, ast::NorgDocument, node::Node) - codegen(t, ast, first(children(node))) + return codegen(t, ast, first(children(node))) end function codegen(t::JSONTarget, ::IndentSegment, ast::NorgDocument, node::Node) - codegen_children(t, ast, node) + return codegen_children(t, ast, node) end export JSONTarget diff --git a/src/kind.jl b/src/kind.jl index 368f9f2..cac558d 100644 --- a/src/kind.jl +++ b/src/kind.jl @@ -12,14 +12,14 @@ module Kinds All the defined kind names. """ const _kind_names = [ - "None" - "StartOfFile" - "EndOfFile" - "BEGIN_WHITESPACE" + "None" + "StartOfFile" + "EndOfFile" + "BEGIN_WHITESPACE" "LineEnding" "Whitespace" - "END_WHITESPACE" - "BEGIN_PUNCTUATION" + "END_WHITESPACE" + "BEGIN_PUNCTUATION" "Punctuation" "\\" "*" @@ -50,37 +50,37 @@ const _kind_names = [ ")" "|" "+" - "END_PUNCTUATION" - "x" - "Word" + "END_PUNCTUATION" + "x" + "Word" - # AST stuff - "NorgDocument" - "BEGIN_AST_NODE" + # AST stuff + "NorgDocument" + "BEGIN_AST_NODE" # Leafs contain a set of tokens. "BEGIN_AST_LEAF" - "WordNode" - "Number" - "TagName" - "TagParameter" - "VerbatimBody" - "HeadingPreamble" - "NestablePreamble" - "LineNumberTarget" - "URLTarget" - "FileTarget" - "FileNorgRootTarget" - "Timestamp" - "BEGIN_TODO_STATUS" - "StatusUndone" - "StatusDone" - "StatusNeedFurtherInput" - "StatusUrgent" - "StatusRecurring" - "StatusInProgress" - "StatusOnHold" - "StatusCancelled" - "END_TODO_STATUS" + "WordNode" + "Number" + "TagName" + "TagParameter" + "VerbatimBody" + "HeadingPreamble" + "NestablePreamble" + "LineNumberTarget" + "URLTarget" + "FileTarget" + "FileNorgRootTarget" + "Timestamp" + "BEGIN_TODO_STATUS" + "StatusUndone" + "StatusDone" + "StatusNeedFurtherInput" + "StatusUrgent" + "StatusRecurring" + "StatusInProgress" + "StatusOnHold" + "StatusCancelled" + "END_TODO_STATUS" "END_AST_LEAF" "Paragraph" "ParagraphSegment" @@ -92,118 +92,117 @@ const _kind_names = [ "RangeableItem" "StandardRangedTagBody" "BEGIN_TAG" - "BEGIN_RANGED_TAG" - "Verbatim" - "END_RANGED_TAG" - "BEGIN_CARRYOVER_TAG" - "WeakCarryoverTag" - "StrongCarryoverTag" - "END_CARRYOVER_TAG" - "StandardRangedTag" + "BEGIN_RANGED_TAG" + "Verbatim" + "END_RANGED_TAG" + "BEGIN_CARRYOVER_TAG" + "WeakCarryoverTag" + "StrongCarryoverTag" + "END_CARRYOVER_TAG" + "StandardRangedTag" "END_TAG" "HeadingTitle" "BEGIN_MATCHED_INLINE" - "BEGIN_ATTACHED_MODIFIER" - "Bold" - "Italic" - "Underline" - "Strikethrough" - "Spoiler" - "Superscript" - "Subscript" - "InlineCode" - "NullModifier" - "InlineMath" - "Variable" - "BEGIN_FREE_FORM_ATTACHED_MODIFIER" - "FreeFormBold" - "FreeFormItalic" - "FreeFormUnderline" - "FreeFormStrikethrough" - "FreeFormSpoiler" - "FreeFormSuperscript" - "FreeFormSubscript" - "FreeFormInlineCode" - "FreeFormNullModifier" - "FreeFormInlineMath" - "FreeFormVariable" - "END_FREE_FORM_ATTACHED_MODIFIER" - "END_ATTACHED_MODIFIER" - "BEGIN_LINK_LOCATION" - "URLLocation" - "LineNumberLocation" - "DetachedModifierLocation" - "MagicLocation" - "FileLocation" - "NorgFileLocation" - "WikiLocation" - "TimestampLocation" - "END_LINK_LOCATION" - "LinkDescription" - "LinkLocation" - "InlineLinkTarget" + "BEGIN_ATTACHED_MODIFIER" + "Bold" + "Italic" + "Underline" + "Strikethrough" + "Spoiler" + "Superscript" + "Subscript" + "InlineCode" + "NullModifier" + "InlineMath" + "Variable" + "BEGIN_FREE_FORM_ATTACHED_MODIFIER" + "FreeFormBold" + "FreeFormItalic" + "FreeFormUnderline" + "FreeFormStrikethrough" + "FreeFormSpoiler" + "FreeFormSuperscript" + "FreeFormSubscript" + "FreeFormInlineCode" + "FreeFormNullModifier" + "FreeFormInlineMath" + "FreeFormVariable" + "END_FREE_FORM_ATTACHED_MODIFIER" + "END_ATTACHED_MODIFIER" + "BEGIN_LINK_LOCATION" + "URLLocation" + "LineNumberLocation" + "DetachedModifierLocation" + "MagicLocation" + "FileLocation" + "NorgFileLocation" + "WikiLocation" + "TimestampLocation" + "END_LINK_LOCATION" + "LinkDescription" + "LinkLocation" + "InlineLinkTarget" "END_MATCHED_INLINE" "BEGIN_DETACHED_MODIFIER" - "BEGIN_HEADING" - "Heading1" - "Heading2" - "Heading3" - "Heading4" - "Heading5" - "Heading6" - "END_HEADING" - "BEGIN_DELIMITING_MODIFIER" - "WeakDelimitingModifier" - "StrongDelimitingModifier" - "HorizontalRule" - "END_DELIMITING_MODIFIER" - "BEGIN_NESTABLE" - "BEGIN_UNORDERED_LIST" - "UnorderedList1" - "UnorderedList2" - "UnorderedList3" - "UnorderedList4" - "UnorderedList5" - "UnorderedList6" - "END_UNORDERED_LIST" - "BEGIN_ORDERED_LIST" - "OrderedList1" - "OrderedList2" - "OrderedList3" - "OrderedList4" - "OrderedList5" - "OrderedList6" - "END_ORDERED_LIST" - "BEGIN_QUOTE" - "Quote1" - "Quote2" - "Quote3" - "Quote4" - "Quote5" - "Quote6" - "END_QUOTE" - "END_NESTABLE" - "BEGIN_DETACHED_MODIFIER_EXTENSIONS" - "TodoExtension" - "TimestampExtension" - "PriorityExtension" - "DueDateExtension" - "StartDateExtension" - "END_DETACHED_MODIFIER_EXTENSIONS" - "BEGIN_RANGEABLE_DETACHED_MODIFIERS" - "Definition" - "TableCell" - "Footnote" - "END_RANGEABLE_DETACHED_MODIFIERS" - "BEGIN_DETACHED_MODIFIER_SUFFIX" - "Slide" - "IndentSegment" - "END_DETACHED_MODIFIER_SUFFIX" + "BEGIN_HEADING" + "Heading1" + "Heading2" + "Heading3" + "Heading4" + "Heading5" + "Heading6" + "END_HEADING" + "BEGIN_DELIMITING_MODIFIER" + "WeakDelimitingModifier" + "StrongDelimitingModifier" + "HorizontalRule" + "END_DELIMITING_MODIFIER" + "BEGIN_NESTABLE" + "BEGIN_UNORDERED_LIST" + "UnorderedList1" + "UnorderedList2" + "UnorderedList3" + "UnorderedList4" + "UnorderedList5" + "UnorderedList6" + "END_UNORDERED_LIST" + "BEGIN_ORDERED_LIST" + "OrderedList1" + "OrderedList2" + "OrderedList3" + "OrderedList4" + "OrderedList5" + "OrderedList6" + "END_ORDERED_LIST" + "BEGIN_QUOTE" + "Quote1" + "Quote2" + "Quote3" + "Quote4" + "Quote5" + "Quote6" + "END_QUOTE" + "END_NESTABLE" + "BEGIN_DETACHED_MODIFIER_EXTENSIONS" + "TodoExtension" + "TimestampExtension" + "PriorityExtension" + "DueDateExtension" + "StartDateExtension" + "END_DETACHED_MODIFIER_EXTENSIONS" + "BEGIN_RANGEABLE_DETACHED_MODIFIERS" + "Definition" + "TableCell" + "Footnote" + "END_RANGEABLE_DETACHED_MODIFIERS" + "BEGIN_DETACHED_MODIFIER_SUFFIX" + "Slide" + "IndentSegment" + "END_DETACHED_MODIFIER_SUFFIX" "END_DETACHED_MODIFIER" - "END_AST_NODE" + "END_AST_NODE" ] - """ Kind(name) K"name" @@ -212,9 +211,7 @@ This is type tag, used to specify the type of tokens and AST nodes. """ primitive type Kind 8 end -let kind_int_type = :UInt8, - max_kind_int = length(_kind_names)-1 - +let kind_int_type = :UInt8, max_kind_int = length(_kind_names) - 1 @eval begin function Kind(x::Integer) if x < 0 || x > $max_kind_int @@ -223,32 +220,44 @@ let kind_int_type = :UInt8, return Base.bitcast(Kind, convert($kind_int_type, x)) end - Base.convert(::Type{String}, k::Kind) = _kind_names[1 + Base.bitcast($kind_int_type, k)] - - let kindstr_to_int = Dict(s=>i-1 for (i,s) in enumerate(_kind_names)) + let kindstr_to_int = Dict(s => i - 1 for (i, s) in enumerate(_kind_names)) function Base.convert(::Type{Kind}, s::AbstractString) i = get(kindstr_to_int, s) do error("unknown Kind name $(repr(s))") end - Kind(i) + return Kind(i) end end - Base.string(x::Kind) = convert(String, x) - Base.print(io::IO, x::Kind) = print(io, convert(String, x)) + Base.string(k::Kind) = _kind_names[1 + Base.bitcast($kind_int_type, k)] + Base.print(io::IO, x::Kind) = print(io, string(x)) Base.typemin(::Type{Kind}) = Kind(0) Base.typemax(::Type{Kind}) = Kind($max_kind_int) - Base.instances(::Type{Kind}) = (Kind(i) for i in reinterpret($kind_int_type, typemin(Kind)):reinterpret($kind_int_type, typemax(Kind))) - Base.:<(x::Kind, y::Kind) = reinterpret($kind_int_type, x) < reinterpret($kind_int_type, y) + function Base.instances(::Type{Kind}) + return ( + Kind(i) for i in + reinterpret($kind_int_type, typemin(Kind)):reinterpret( + $kind_int_type, typemax(Kind) + ) + ) + end + function Base.:<(x::Kind, y::Kind) + return reinterpret($kind_int_type, x) < reinterpret($kind_int_type, y) + end - all_single_punctuation_tokens() = (Kind(i) for i in (reinterpret($kind_int_type, convert(Kind, "Punctuation"))+1):(reinterpret($kind_int_type, convert(Kind, "END_PUNCTUATION"))-1)) + function all_single_punctuation_tokens() + return ( + Kind(i) for i in + (reinterpret($kind_int_type, convert(Kind, "Punctuation")) + 1):(reinterpret($kind_int_type, convert(Kind, "END_PUNCTUATION")) - 1) + ) + end end end function Base.show(io::IO, k::Kind) - print(io, "K\"$(convert(String, k))\"") + return print(io, "K\"$(string(k))\"") end """ @@ -258,7 +267,7 @@ For example * K">" is the kind of the greater than sign token """ macro K_str(s) - convert(Kind, s) + return convert(Kind, s) end """ @@ -288,11 +297,17 @@ kind(::Nothing) = K"None" is_leaf(k::Kind) = K"BEGIN_AST_LEAF" < k < K"END_AST_LEAF" is_matched_inline(k::Kind) = K"BEGIN_MATCHED_INLINE" < k < K"END_MATCHED_INLINE" is_attached_modifier(k::Kind) = K"BEGIN_ATTACHED_MODIFIER" < k < K"END_ATTACHED_MODIFIER" -is_free_form_attached_modifier(k::Kind) = K"BEGIN_FREE_FORM_ATTACHED_MODIFIER" < k < K"END_FREE_FORM_ATTACHED_MODIFIER" +function is_free_form_attached_modifier(k::Kind) + return K"BEGIN_FREE_FORM_ATTACHED_MODIFIER" < k < K"END_FREE_FORM_ATTACHED_MODIFIER" +end is_link_location(k::Kind) = K"BEGIN_LINK_LOCATION" < k < K"END_LINK_LOCATION" is_detached_modifier(k::Kind) = K"BEGIN_DETACHED_MODIFIER" < k < K"END_DETACHED_MODIFIER" -is_detached_modifier_extension(k::Kind) = K"BEGIN_DETACHED_MODIFIER_EXTENSIONS" < k < K"END_DETACHED_MODIFIER_EXTENSIONS" -is_delimiting_modifier(k::Kind) = K"BEGIN_DELIMITING_MODIFIER" < k < K"END_DELIMITING_MODIFIER" +function is_detached_modifier_extension(k::Kind) + return K"BEGIN_DETACHED_MODIFIER_EXTENSIONS" < k < K"END_DETACHED_MODIFIER_EXTENSIONS" +end +function is_delimiting_modifier(k::Kind) + return K"BEGIN_DELIMITING_MODIFIER" < k < K"END_DELIMITING_MODIFIER" +end is_nestable(k::Kind) = K"BEGIN_NESTABLE" < k < K"END_NESTABLE" is_heading(k::Kind) = K"BEGIN_HEADING" < k < K"END_HEADING" is_unordered_list(k::Kind) = K"BEGIN_UNORDERED_LIST" < k < K"END_UNORDERED_LIST" diff --git a/src/match/attached_modifiers.jl b/src/match/attached_modifiers.jl index b646d34..2a1a367 100644 --- a/src/match/attached_modifiers.jl +++ b/src/match/attached_modifiers.jl @@ -32,52 +32,61 @@ freeformattachedmodifier(::InlineCode) = K"FreeFormInlineCode" freeformattachedmodifier(::NullModifier) = K"FreeFormNullModifier" freeformattachedmodifier(::InlineMath) = K"FreeFormInlineMath" freeformattachedmodifier(::Variable) = K"FreeFormVariable" +freeformattachedmodifier(t::T) where {T<:FreeFormAttachedModifier} = attachedmodifier(t) function match_norg(t::T, parents, tokens, i) where {T<:AttachedModifierStrategy} if K"LinkLocation" ∈ parents return MatchNotFound() end - @debug "matching attached modifier" next_i = nextind(tokens, i) next_token = tokens[next_i] prev_i = prevind(tokens, i) last_token = tokens[prev_i] # if opening modifier is found - if (is_sof(last_token) || is_punctuation(last_token) || is_whitespace(last_token)) && (!is_eof(next_token) && !is_whitespace(next_token)) + if (is_sof(last_token) || is_punctuation(last_token) || is_whitespace(last_token)) && + (!is_eof(next_token) && !is_whitespace(next_token)) if kind(next_token) == K"|" MatchFound(freeformattachedmodifier(t)) else MatchFound(attachedmodifier(t)) end - # Link modifier + # Link modifier elseif kind(last_token) == K":" && (!is_eof(next_token) && !is_whitespace(next_token)) prev_prev_i = prevind(tokens, prev_i) - if prev_prev_i >= firstindex(tokens) && (is_sof(tokens[prev_prev_i]) || is_punctuation(tokens[prev_prev_i]) || is_whitespace(tokens[prev_prev_i])) + if prev_prev_i >= firstindex(tokens) && ( + is_sof(tokens[prev_prev_i]) || + is_punctuation(tokens[prev_prev_i]) || + is_whitespace(tokens[prev_prev_i]) + ) MatchFound(attachedmodifier(t)) else MatchNotFound() end - # Closing modifier - elseif attachedmodifier(t) ∈ parents && !is_whitespace(last_token) && (is_eof(next_token) || is_whitespace(next_token) || is_punctuation(next_token)) - MatchClosing(attachedmodifier(t), first(parents)==attachedmodifier(t)) + # Closing modifier + elseif attachedmodifier(t) ∈ parents && + !is_whitespace(last_token) && + (is_eof(next_token) || is_whitespace(next_token) || is_punctuation(next_token)) + MatchClosing(attachedmodifier(t), first(parents) == attachedmodifier(t)) else MatchNotFound() end end -function match_norg(t::T, parents, tokens, i) where {T <: Union{VerbatimAttachedModifierStrategy, FreeFormAttachedModifier}} +function match_norg( + t::T, parents, tokens, i +) where {T<:Union{VerbatimAttachedModifierStrategy,FreeFormAttachedModifier}} if K"LinkLocation" ∈ parents return MatchNotFound() end - @debug "you know where" next_i = nextind(tokens, i) next_token = tokens[next_i] prev_i = prevind(tokens, i) last_token = tokens[prev_i] token = tokens[i] # Opening modifier - if attachedmodifier(t) ∉ parents && (is_sof(last_token) || is_punctuation(last_token) || is_whitespace(last_token)) && (!is_eof(next_token) && !is_whitespace(next_token)) - @debug "going to open" last_token token next_token + if attachedmodifier(t) ∉ parents && + (is_sof(last_token) || is_punctuation(last_token) || is_whitespace(last_token)) && + (!is_eof(next_token) && !is_whitespace(next_token)) if kind(next_token) == K"|" # Edge case: we want to be able to write `|` (verbatim attached # modifiers have higher precedence than free-form attached modifiers) @@ -85,7 +94,11 @@ function match_norg(t::T, parents, tokens, i) where {T <: Union{VerbatimAttached token = tokens[i] next_i = nextind(tokens, i) next_token = tokens[next_i] - if kind(token) == K"`" && (is_punctuation(next_token) || is_whitespace(next_token) || is_eof(next_token)) + if kind(token) == K"`" && ( + is_punctuation(next_token) || + is_whitespace(next_token) || + is_eof(next_token) + ) MatchFound(attachedmodifier(t)) else MatchFound(freeformattachedmodifier(t)) @@ -95,23 +108,28 @@ function match_norg(t::T, parents, tokens, i) where {T <: Union{VerbatimAttached else MatchFound(attachedmodifier(t)) end - # Closing modifier + # Closing modifier elseif attachedmodifier(t) ∈ parents && t isa FreeFormAttachedModifier - @debug "closing free-form" t - MatchClosing(attachedmodifier(t), first(parents)==attachedmodifier(t)) - elseif attachedmodifier(t) ∈ parents && !is_whitespace(last_token) && (is_eof(next_token) || is_whitespace(next_token) || is_punctuation(next_token)) MatchClosing(attachedmodifier(t), first(parents) == attachedmodifier(t)) - # Link modifier - elseif !(t isa FreeFormAttachedModifier) && kind(last_token) == K":" && (!is_eof(next_token) && !is_whitespace(next_token)) - @debug "link modifier" + elseif attachedmodifier(t) ∈ parents && + !is_whitespace(last_token) && + (is_eof(next_token) || is_whitespace(next_token) || is_punctuation(next_token)) + MatchClosing(attachedmodifier(t), first(parents) == attachedmodifier(t)) + # Link modifier + elseif !(t isa FreeFormAttachedModifier) && + kind(last_token) == K":" && + (!is_eof(next_token) && !is_whitespace(next_token)) prev_prev_i = prevind(tokens, prev_i) - if prev_prev_i >= firstindex(tokens) && (is_sof(tokens[prev_prev_i]) || is_punctuation(tokens[prev_prev_i]) || is_whitespace(tokens[prev_prev_i])) + if prev_prev_i >= firstindex(tokens) && ( + is_sof(tokens[prev_prev_i]) || + is_punctuation(tokens[prev_prev_i]) || + is_whitespace(tokens[prev_prev_i]) + ) MatchFound(attachedmodifier(t)) else MatchNotFound() end else - @debug "nah" MatchNotFound() end end diff --git a/src/match/detached_modifier_suffix.jl b/src/match/detached_modifier_suffix.jl index 9fc2691..2259b49 100644 --- a/src/match/detached_modifier_suffix.jl +++ b/src/match/detached_modifier_suffix.jl @@ -1,14 +1,11 @@ function match_norg(::DetachedModifierSuffix, parents, tokens, i) next_i = nextind(tokens, i) next_token = tokens[next_i] - @debug "detachedmodifier match" parents next_token tokens[next_i + 1] if first(parents) == K"NestableItem" && kind(next_token) == K"LineEnding" MatchFound(K"Slide") elseif first(parents) == K"NestableItem" && kind(next_token) == K":" next_token = tokens[nextind(tokens, next_i)] - @debug "maybe indent segment?" if kind(next_token) == K"LineEnding" - @debug "Indent segment" MatchFound(K"IndentSegment") else MatchNotFound() diff --git a/src/match/detached_modifiers.jl b/src/match/detached_modifiers.jl index fa30aa3..cbd7638 100644 --- a/src/match/detached_modifiers.jl +++ b/src/match/detached_modifiers.jl @@ -14,24 +14,25 @@ function match_norg(::Heading, parents, tokens, i) return MatchClosing(first(nestable_parents), false) end new_i = i - level = 0 + current_level = 0 while new_i < lastindex(tokens) && kind(tokens[new_i]) == K"*" new_i = nextind(tokens, new_i) - level += 1 + current_level += 1 end next_token = tokens[new_i] if kind(next_token) == K"Whitespace" # If we are in a standard ranged tag, the relevant parents are those # within the tag. ancestor_headings = filter(is_heading, relevant_parents) - higher_level_ancestor_heading = findfirst(x -> heading_level(x) >= level, ancestor_headings) - @debug "Closing heading ?" relevant_parents higher_level_ancestor_heading + higher_level_ancestor_heading = findfirst( + ≥(current_level) ∘ heading_level, ancestor_headings + ) if !isnothing(higher_level_ancestor_heading) MatchClosing(ancestor_headings[higher_level_ancestor_heading], false) elseif first(relevant_parents) ∈ [K"ParagraphSegment", K"Paragraph"] MatchClosing(first(relevant_parents), false) else - MatchFound(heading_level(level)) + MatchFound(heading_kind(current_level)) end else MatchNotFound() @@ -50,7 +51,7 @@ function match_norg(t::T, parents, tokens, i) where {T<:DelimitingModifier} return MatchNotFound() end token = tokens[i] - if kind(next_token) == kind(token) + if kind(next_token) == kind(token) new_i = nextind(tokens, next_i) new_token = tokens[new_i] is_delimiting = true @@ -63,8 +64,8 @@ function match_norg(t::T, parents, tokens, i) where {T<:DelimitingModifier} new_token = tokens[new_i] end if is_delimiting - @debug "Found a delimiter" delimitingmodifier(t) parents - if first(parents) ∈ KSet"NorgDocument IndentSegment StandardRangedTagBody" || is_heading(first(parents)) + if first(parents) ∈ KSet"NorgDocument IndentSegment StandardRangedTagBody" || + is_heading(first(parents)) MatchFound(delimitingmodifier(t)) else MatchClosing(first(parents), false) @@ -77,46 +78,46 @@ function match_norg(t::T, parents, tokens, i) where {T<:DelimitingModifier} end end -function nestable(::Quote, level) - if level<=1 +function nestable(::Quote, l) + if l <= 1 K"Quote1" - elseif level == 2 + elseif l == 2 K"Quote2" - elseif level == 3 + elseif l == 3 K"Quote3" - elseif level == 4 + elseif l == 4 K"Quote4" - elseif level == 5 + elseif l == 5 K"Quote5" else K"Quote6" end end -function nestable(::UnorderedList, level) - if level<=1 +function nestable(::UnorderedList, l) + if l <= 1 K"UnorderedList1" - elseif level == 2 + elseif l == 2 K"UnorderedList2" - elseif level == 3 + elseif l == 3 K"UnorderedList3" - elseif level == 4 + elseif l == 4 K"UnorderedList4" - elseif level == 5 + elseif l == 5 K"UnorderedList5" else K"UnorderedList6" end end -function nestable(::OrderedList, level) - if level<=1 +function nestable(::OrderedList, l) + if l <= 1 K"OrderedList1" - elseif level == 2 + elseif l == 2 K"OrderedList2" - elseif level == 3 + elseif l == 3 K"OrderedList3" - elseif level == 4 + elseif l == 4 K"OrderedList4" - elseif level == 5 + elseif l == 5 K"OrderedList5" else K"OrderedList6" @@ -124,27 +125,28 @@ function nestable(::OrderedList, level) end function match_norg(t::T, parents, tokens, i) where {T<:Nestable} new_i = i - level = 0 + current_level = 0 token = tokens[i] while new_i < lastindex(tokens) && kind(tokens[new_i]) == kind(token) new_i = nextind(tokens, new_i) - level += 1 + current_level += 1 end next_token = tokens[new_i] if kind(next_token) == K"Whitespace" ancestor_nestable = filter(is_nestable, parents) - higher_level_ancestor_id = findfirst(x->nestable_level(x) > level, ancestor_nestable) + higher_level_ancestor_id = findfirst( + >(current_level) ∘ nestable_level, ancestor_nestable + ) if !isnothing(higher_level_ancestor_id) MatchClosing(ancestor_nestable[higher_level_ancestor_id], false) - elseif first(parents) == nestable(t, level) + elseif first(parents) == nestable(t, current_level) MatchFound(K"NestableItem") - elseif any(nestable_level.(ancestor_nestable) .== level) + elseif any(nestable_level.(ancestor_nestable) .== current_level) MatchClosing(first(parents), false) elseif first(parents) ∈ [K"Paragraph", K"ParagraphSegment"] - @debug "Chérie ça va couper." parents tokens[i] MatchClosing(first(parents), false) else - MatchFound(nestable(t, level)) + MatchFound(nestable(t, current_level)) end else MatchNotFound() diff --git a/src/match/links.jl b/src/match/links.jl index 301c6c0..61f4969 100644 --- a/src/match/links.jl +++ b/src/match/links.jl @@ -31,7 +31,7 @@ function match_norg(::LinkDescription, parents, tokens, i) end function match_norg(::LinkSubTarget, parents, tokens, i) - if kind(first(parents)) == K"FileLocation" + if kind(first(parents)) == K"FileLocation" if isnumeric(first(value(tokens[i]))) MatchFound(K"LineNumberLocation") else @@ -61,7 +61,9 @@ function match_norg(::Anchor, parents, tokens, i) end function match_norg(::InlineLinkTarget, parents, tokens, i) - if kind(tokens[i]) == K"<" && kind(tokens[nextind(tokens, i)]) != K"LineEnding" && K"InlineLinkTarget" ∉ parents + if kind(tokens[i]) == K"<" && + kind(tokens[nextind(tokens, i)]) != K"LineEnding" && + K"InlineLinkTarget" ∉ parents MatchFound(K"InlineLinkTarget") else MatchNotFound() diff --git a/src/match/match.jl b/src/match/match.jl index 9d311e4..0932aef 100644 --- a/src/match/match.jl +++ b/src/match/match.jl @@ -14,7 +14,7 @@ be `found`, can be `closing` (*i.e.* closing an attached modifier), `continued` (as in "ignore this token and continue parsing"). Whether the parser should `consume` or not the current token is given by the `consume` field. """ -struct MatchResult +struct MatchResult kind::Kind found::Bool closing::Bool @@ -31,7 +31,7 @@ isclosing(m::MatchResult) = m.closing iscontinue(m::MatchResult) = m.continued isnotfound(m::MatchResult) = !m.found consume(m::MatchResult) = m.consume -matched(m::MatchResult)= m.kind +matched(m::MatchResult) = m.kind function Base.show(io::IO, m::MatchResult) if isclosing(m) @@ -43,7 +43,7 @@ function Base.show(io::IO, m::MatchResult) else print(io, "MatchFound(") end - print(io, "kind=$(matched(m)), consume=$(consume(m)))") + return print(io, "kind=$(matched(m)), consume=$(consume(m)))") end """ @@ -90,7 +90,6 @@ function force_word_context(parents, tokens, i) end function match_norg(parents, tokens, i) - @debug "Matching..." tokens[i] parents token = tokens[i] m = if force_word_context(parents, tokens, i) match_norg(Word(), parents, tokens, i) @@ -100,7 +99,7 @@ function match_norg(parents, tokens, i) match_norg(LineEnding(), parents, tokens, i) elseif kind(token) == K"*" match_norg(Star(), parents, tokens, i) - elseif kind(token) == K"/" + elseif kind(token) == K"/" match_norg(Slash(), parents, tokens, i) elseif kind(token) == K"_" match_norg(Underscore(), parents, tokens, i) @@ -119,25 +118,25 @@ function match_norg(parents, tokens, i) elseif kind(token) == K"&" match_norg(Ampersand(), parents, tokens, i) elseif kind(token) == K"\\" - match_norg(BackSlash(), parents, tokens, i) + match_norg(BackSlash(), parents, tokens, i) elseif kind(token) == K"=" - match_norg(EqualSign(), parents, tokens, i) + match_norg(EqualSign(), parents, tokens, i) elseif kind(token) == K"{" - match_norg(LeftBrace(), parents, tokens, i) + match_norg(LeftBrace(), parents, tokens, i) elseif kind(token) == K"}" - match_norg(RightBrace(), parents, tokens, i) + match_norg(RightBrace(), parents, tokens, i) elseif kind(token) == K"]" - match_norg(RightSquareBracket(), parents, tokens, i) + match_norg(RightSquareBracket(), parents, tokens, i) elseif kind(token) == K"[" - match_norg(LeftSquareBracket(), parents, tokens, i) + match_norg(LeftSquareBracket(), parents, tokens, i) elseif kind(token) == K"~" - match_norg(Tilde(), parents, tokens, i) + match_norg(Tilde(), parents, tokens, i) elseif kind(token) == K">" - match_norg(GreaterThanSign(), parents, tokens, i) + match_norg(GreaterThanSign(), parents, tokens, i) elseif kind(token) == K"<" - match_norg(LesserThanSign(), parents, tokens, i) + match_norg(LesserThanSign(), parents, tokens, i) elseif kind(token) == K"@" - match_norg(CommercialAtSign(), parents, tokens, i) + match_norg(CommercialAtSign(), parents, tokens, i) elseif kind(token) == K"(" match_norg(LeftParenthesis(), parents, tokens, i) elseif kind(token) == K")" @@ -160,10 +159,10 @@ function match_norg(parents, tokens, i) if isnotfound(m) m = match_norg(Word(), parents, tokens, i) end - m + return m end -function match_norg(::Word, parents, tokens, i) +function match_norg(::Word, parents, tokens, i) if is_nestable(first(parents)) MatchClosing(first(parents), false) else @@ -201,7 +200,6 @@ function match_norg(::Whitespace, parents, tokens, i) elseif kind(next_token) == K"$" match_norg(Definition(), parents, tokens, nextind(tokens, i)) elseif kind(next_token) == K"^" - @debug "haha footnote" match_norg(Footnote(), parents, tokens, nextind(tokens, i)) else MatchNotFound() @@ -213,10 +211,9 @@ end function match_norg(::LineEnding, parents, tokens, i) prev_token = tokens[prevind(tokens, i)] - if first(parents) == K"NorgDocument" + if first(parents) == K"NorgDocument" MatchContinue() elseif is_line_ending(prev_token) - @debug "lineEnding" tokens[i] prev_token parents nestable_parents = filter(is_nestable, parents[2:end]) attached_parents = filter(is_attached_modifier, parents) if first(parents) ∈ KSet"IndentSegment StandardRangedTagBody" @@ -247,7 +244,7 @@ function match_norg(::LineEnding, parents, tokens, i) MatchContinue() end elseif K"ParagraphSegment" ∈ parents - MatchClosing(K"ParagraphSegment", first(parents)==K"ParagraphSegment") + MatchClosing(K"ParagraphSegment", first(parents) == K"ParagraphSegment") elseif K"StandardRangedTagBody" ∈ parents i = nextind(tokens, i) m = match_norg(StandardRangedTag(), parents, tokens, nextind(tokens, i)) @@ -272,7 +269,7 @@ function match_norg(::Star, parents, tokens, i) if isnotfound(m) m = match_norg(Bold(), parents, tokens, i) end - m + return m end match_norg(::Slash, parents, tokens, i) = match_norg(Italic(), parents, tokens, i) @@ -294,31 +291,28 @@ end function match_norg(::Minus, parents, tokens, i) prev_token = tokens[prevind(tokens, i)] if is_sof(prev_token) || is_line_ending(prev_token) - possible_node = [ - WeakDelimiter(), - UnorderedList(), - Strikethrough(), - ] - m = MatchNotFound() - for node in possible_node - m = match_norg(node, parents, tokens, i) - if !isnotfound(m) - break - end + m = match_norg(WeakDelimiter(), parents, tokens, i) + if isfound(m) + return m end - m + m = match_norg(UnorderedList(), parents, tokens, i) + if isfound(m) + return m + end + match_norg(Strikethrough(), parents, tokens, i) else match_norg(Strikethrough(), parents, tokens, i) end end -match_norg(::ExclamationMark, parents, tokens, i) = match_norg(Spoiler(), parents, tokens, i) +function match_norg(::ExclamationMark, parents, tokens, i) + return match_norg(Spoiler(), parents, tokens, i) +end function match_norg(::Circumflex, parents, tokens, i) - @debug "bonjour c'est circumflex" tokens[i] prev_token = tokens[prevind(tokens, i)] m = if is_line_ending(prev_token) || is_sof(prev_token) - match_norg(Footnote(), parents, tokens, i) + match_norg(Footnote(), parents, tokens, i) else MatchNotFound() end @@ -331,9 +325,13 @@ end match_norg(::Comma, parents, tokens, i) = match_norg(Subscript(), parents, tokens, i) -match_norg(::BackApostrophe, parents, tokens, i) = match_norg(InlineCode(), parents, tokens, i) +function match_norg(::BackApostrophe, parents, tokens, i) + return match_norg(InlineCode(), parents, tokens, i) +end -match_norg(::PercentSign, parents, tokens, i) = match_norg(NullModifier(), parents, tokens, i) +function match_norg(::PercentSign, parents, tokens, i) + return match_norg(NullModifier(), parents, tokens, i) +end match_norg(::Ampersand, parents, tokens, i) = match_norg(Variable(), parents, tokens, i) @@ -346,14 +344,13 @@ function match_norg(::Colon, parents, tokens, i) next_token = tokens[next_i] prev_i = prevind(tokens, i) prev_token = tokens[prev_i] - @debug "hey there" kind(prev_token)∈ATTACHED_DELIMITERS prev_token - if kind(next_token) ∈ ATTACHED_DELIMITERS + if kind(next_token) ∈ ATTACHED_DELIMITERS m = match_norg(parents, tokens, next_i) if isfound(m) && AST.is_attached_modifier(kind(matched(m))) return MatchContinue() end end - MatchNotFound() + MatchNotFound() else m end @@ -404,7 +401,7 @@ function match_norg(::LeftSquareBracket, parents, tokens, i) end prev_i = prevind(tokens, i) last_token = tokens[prev_i] - next_i = nextind(tokens,i) + next_i = nextind(tokens, i) next_token = tokens[next_i] if kind(last_token) == K"}" && kind(next_token) != K"LineEnding" MatchFound(K"LinkDescription") @@ -433,17 +430,17 @@ function match_norg(::GreaterThanSign, parents, tokens, i) end end -match_norg(::LesserThanSign, parents, tokens, i) = match_norg(InlineLinkTarget(), parents, tokens, i) +function match_norg(::LesserThanSign, parents, tokens, i) + return match_norg(InlineLinkTarget(), parents, tokens, i) +end tag_to_strategy(::CommercialAtSign) = Verbatim() tag_to_strategy(::Plus) = WeakCarryoverTag() tag_to_strategy(::NumberSign) = StrongCarryoverTag() -function match_norg(t::Union{CommercialAtSign, Plus, NumberSign}, parents, tokens, i) +function match_norg(t::Union{CommercialAtSign,Plus,NumberSign}, parents, tokens, i) prev_token = tokens[prevind(tokens, i)] - @debug "Matching" t prev_token if is_sof(prev_token) || is_line_ending(prev_token) - @debug "Prev token is ok" tokens[i] prev_token match_norg(tag_to_strategy(t), parents, tokens, i) elseif is_whitespace(prev_token) prev_i = prevind(tokens, i) @@ -459,10 +456,9 @@ function match_norg(t::Union{CommercialAtSign, Plus, NumberSign}, parents, token end function match_norg(::DollarSign, parents, tokens, i) - @debug "bonjour c'est dollarsign" prev_token = tokens[prevind(tokens, i)] m = if is_line_ending(prev_token) || is_sof(prev_token) - match_norg(Definition(), parents, tokens, i) + match_norg(Definition(), parents, tokens, i) else MatchNotFound() end @@ -476,12 +472,11 @@ end function match_norg(::VerticalBar, parents, tokens, i) next_token = tokens[nextind(tokens, i)] prev_token = tokens[prevind(tokens, i)] - @debug "vertical" tokens[i] prev_token next_token if is_sof(prev_token) || is_line_ending(prev_token) match_norg(StandardRangedTag(), parents, tokens, i) elseif kind(next_token) == K"*" match_norg(FreeFormBold(), parents, tokens, i) - elseif kind(next_token) == K"/" + elseif kind(next_token) == K"/" match_norg(FreeFormItalic(), parents, tokens, i) elseif kind(next_token) == K"_" match_norg(FreeFormUnderline(), parents, tokens, i) @@ -507,7 +502,8 @@ function match_norg(::VerticalBar, parents, tokens, i) end function match_norg(::LeftParenthesis, parents, tokens, i) - if is_detached_modifier(first(parents)) || (length(parents) > 1 && is_detached_modifier(parents[2])) + if is_detached_modifier(first(parents)) || + (length(parents) > 1 && is_detached_modifier(parents[2])) match_norg(DetachedModifierExtension(), parents, tokens, i) else MatchNotFound() diff --git a/src/match/rangeable_detached_modifier.jl b/src/match/rangeable_detached_modifier.jl index e7e808b..20f6c59 100644 --- a/src/match/rangeable_detached_modifier.jl +++ b/src/match/rangeable_detached_modifier.jl @@ -5,7 +5,6 @@ rangeable_from_strategy(::Footnote) = K"Footnote" function match_norg(t::T, parents, tokens, i) where {T<:RangeableDetachedModifier} token = tokens[i] - @debug "okay, matching rangeable" token if kind(token) != rangeable_from_token(t) return MatchNotFound() end @@ -14,10 +13,10 @@ function match_norg(t::T, parents, tokens, i) where {T<:RangeableDetachedModifie next_i = nextind(tokens, i) next_token = tokens[next_i] if kind(token) == K"Whitespace" - @debug "haha, whitespace" parents if first(parents) == K"Slide" MatchFound(rangeable_from_strategy(t)) - elseif (K"NestableItem" ∈ parents || AST.is_nestable(first(parents))) && K"Slide" ∉ parents + elseif (K"NestableItem" ∈ parents || AST.is_nestable(first(parents))) && + K"Slide" ∉ parents MatchClosing(first(parents), false) elseif !isdisjoint(parents, KSet"Paragraph ParagraphSegment") MatchClosing(first(parents), false) @@ -32,7 +31,8 @@ function match_norg(t::T, parents, tokens, i) where {T<:RangeableDetachedModifie else if first(parents) == K"Slide" MatchFound(rangeable_from_strategy(t)) - elseif (K"NestableItem" ∈ parents || AST.is_nestable(first(parents))) && K"Slide" ∉ parents + elseif (K"NestableItem" ∈ parents || AST.is_nestable(first(parents))) && + K"Slide" ∉ parents MatchClosing(first(parents), false) elseif !isdisjoint(parents, KSet"Paragraph ParagraphSegment") MatchClosing(first(parents), false) @@ -40,8 +40,9 @@ function match_norg(t::T, parents, tokens, i) where {T<:RangeableDetachedModifie MatchFound(rangeable_from_strategy(t)) end end - elseif kind(token) == rangeable_from_token(t) && kind(next_token) == K"LineEnding" && rangeable_from_strategy(t) ∈ parents - @debug "match ending ranged" + elseif kind(token) == rangeable_from_token(t) && + kind(next_token) == K"LineEnding" && + rangeable_from_strategy(t) ∈ parents nextline_i = consume_until(K"LineEnding", tokens, i) token = tokens[nextline_i] nextline_start_i = if kind(token) == K"Whitespace" @@ -50,21 +51,15 @@ function match_norg(t::T, parents, tokens, i) where {T<:RangeableDetachedModifie nextline_i end token = tokens[nextline_start_i] - @debug "next line starts with" token if kind(token) == rangeable_from_token(t) - @debug "start matching the next line" m = match_norg(t, parents, tokens, nextline_start_i) - @debug "stop matching the next line" - @debug "it matches a" m first(parents) rangeable_from_strategy(t) - if isfound(m) && matched(m)==rangeable_from_strategy(t) - @debug "Let's close the current RangeableItem" + if isfound(m) && matched(m) == rangeable_from_strategy(t) MatchClosing(first(parents), true) else - MatchClosing(first(parents), rangeable_from_strategy(t)==first(parents)) + MatchClosing(first(parents), rangeable_from_strategy(t) == first(parents)) end else - @debug "so we close first parent" first(parents) rangeable_from_strategy(t) - MatchClosing(first(parents), rangeable_from_strategy(t)==first(parents)) + MatchClosing(first(parents), rangeable_from_strategy(t) == first(parents)) end else MatchNotFound() diff --git a/src/match/tags.jl b/src/match/tags.jl index 1001487..b8a36fd 100644 --- a/src/match/tags.jl +++ b/src/match/tags.jl @@ -4,15 +4,13 @@ token_tag(::Verbatim) = K"@" token_tag(::StandardRangedTag) = K"|" body(::Verbatim) = K"VerbatimBody" body(::StandardRangedTag) = K"StandardRangedTagBody" -function match_norg(t::T, parents, tokens, i) where {T <: Tag} +function match_norg(t::T, parents, tokens, i) where {T<:Tag} i = nextind(tokens, i) token = tokens[i] - @debug "tag match" parents tokens[i] if kind(token) == K"Word" val = Tokens.value(token) if tag(t) ∈ parents && val == "end" next_token = tokens[nextind(tokens, i)] - @debug "encountered end" token next_token if kind(next_token) ∈ KSet"LineEnding EndOfFile" MatchClosing(tag(t), first(parents) ∈ (tag(t), body(t))) else @@ -22,7 +20,11 @@ function match_norg(t::T, parents, tokens, i) where {T <: Tag} MatchNotFound() elseif kind(first(parents)) ∈ KSet"Slide IndentSegment" MatchFound(tag(t)) - elseif !(is_nestable(first(parents)) || is_heading(first(parents)) || kind(first(parents)) ∈ KSet"NorgDocument StandardRangedTagBody") + elseif !( + is_nestable(first(parents)) || + is_heading(first(parents)) || + kind(first(parents)) ∈ KSet"NorgDocument StandardRangedTagBody" + ) MatchClosing(first(parents), false) else MatchFound(tag(t)) @@ -34,7 +36,6 @@ end function match_norg(::WeakCarryoverTag, parents, tokens, i) token = tokens[nextind(tokens, i)] - @debug "Matching weak carryover tag" if kind(token) == K"Word" nextline = consume_until(K"LineEnding", tokens, i) m = match_norg(parents, tokens, nextline) @@ -50,7 +51,6 @@ end function match_norg(::StrongCarryoverTag, parents, tokens, i) token = tokens[nextind(tokens, i)] - @debug "Matching strong carryover tag" relevant_parents = if K"StandardRangedTag" ∈ parents k = findfirst(parents .== Ref(K"StandardRangedTag"))::Int parents[1:k] @@ -58,7 +58,9 @@ function match_norg(::StrongCarryoverTag, parents, tokens, i) parents end if kind(token) == K"Word" - if is_nestable(first(relevant_parents)) || K"Paragraph" ∈ relevant_parents || K"NestableItem" ∈ relevant_parents + if is_nestable(first(relevant_parents)) || + K"Paragraph" ∈ relevant_parents || + K"NestableItem" ∈ relevant_parents MatchClosing(first(relevant_parents), false) else MatchFound(K"StrongCarryoverTag") diff --git a/src/parser/attachedmodifier.jl b/src/parser/attachedmodifier.jl index c00e321..e93af6f 100644 --- a/src/parser/attachedmodifier.jl +++ b/src/parser/attachedmodifier.jl @@ -23,7 +23,9 @@ consumepost(::FreeFormInlineCode) = 2 consumepost(::FreeFormInlineMath) = 2 consumepost(::FreeFormVariable) = 2 -function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where {T<:AttachedModifierStrategy} +function parse_norg( + t::T, parents::Vector{Kind}, tokens::Vector{Token}, i +) where {T<:AttachedModifierStrategy} start = i children = AST.Node[] for _ in 1:consumepre(t) @@ -33,10 +35,9 @@ function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where m = Match.MatchClosing(node_kind) while !is_eof(tokens[i]) m = match_norg([node_kind, parents...], tokens, i) - @debug "attached modifier loop" m if isclosing(m) if consume(m) && consumepost(t) >= 2 - for _ in 1:(consumepost(t)-1) + for _ in 1:(consumepost(t) - 1) i = nextind(tokens, i) end end @@ -50,21 +51,27 @@ function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where push!(children, segment) end end - @debug "hey it's me" m tokens[i] if is_eof(tokens[i]) || (isclosing(m) && matched(m) == K"None") || # Special case for inline code precedence. (isclosing(m) && matched(m) != node_kind && matched(m) ∈ parents) # we've been tricked in thincking we were in a modifier. - new_children = [parse_norg(Word(), parents, tokens, start), first(children).children...] - children[1] = AST.Node(K"ParagraphSegment", new_children, start, AST.stop(first(children))) + new_children = [ + parse_norg(Word(), parents, tokens, start), first(children).children... + ] + children[1] = AST.Node( + K"ParagraphSegment", new_children, start, AST.stop(first(children)) + ) i = prevind(tokens, i) node_kind = K"None" elseif isempty(children) # Empty attached modifiers are forbiddens - children = [parse_norg(Word(), parents, tokens, start), parse_norg(Word(), parents, tokens, i)] + children = [ + parse_norg(Word(), parents, tokens, start), + parse_norg(Word(), parents, tokens, i), + ] node_kind = K"None" elseif isclosing(m) && !consume(m) i = prevind(tokens, i) elseif isclosing(m) && kind(tokens[nextind(tokens, i)]) == K":" i = nextind(tokens, i) end - AST.Node(node_kind, children, start, i) + return AST.Node(node_kind, children, start, i) end diff --git a/src/parser/detachedmodifierextensions.jl b/src/parser/detachedmodifierextensions.jl index 1b15731..b8e8cd5 100644 --- a/src/parser/detachedmodifierextensions.jl +++ b/src/parser/detachedmodifierextensions.jl @@ -1,7 +1,9 @@ -function parse_norg(::DetachedModifierExtension, parents::Vector{Kind}, tokens::Vector{Token}, i) +function parse_norg( + ::DetachedModifierExtension, parents::Vector{Kind}, tokens::Vector{Token}, i +) m = match_norg(DetachedModifierExtension(), parents, tokens, i) if !Match.isfound(m) - return AST.Node(K"None") + return AST.Node(K"None") end extension = matched(m) if extension == K"TodoExtension" @@ -15,14 +17,14 @@ function parse_norg(::DetachedModifierExtension, parents::Vector{Kind}, tokens:: elseif extension == K"StartDateExtension" parse_norg(StartDateExtension(), parents, tokens, i) else - error("Unhandled detached modifier extension. Token $token.") + error("Unhandled detached modifier extension. Token $(tokens[i]).") end end function parse_norg(::TodoExtension, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i i = nextind(tokens, i) token = tokens[i] - statusstart=i + statusstart = i if kind(token) == K"Whitespace" status = K"StatusUndone" elseif kind(token) == K"x" @@ -50,9 +52,16 @@ function parse_norg(::TodoExtension, parents::Vector{Kind}, tokens::Vector{Token i = piped.stop end if kind(piped) == K"None" - AST.Node(K"TodoExtension", [AST.Node(status, [], statusstart, statusstart)], start, i) + AST.Node( + K"TodoExtension", [AST.Node(status, [], statusstart, statusstart)], start, i + ) else - AST.Node(K"TodoExtension", [AST.Node(status, [], statusstart, statusstart), piped], start, i) + AST.Node( + K"TodoExtension", + [AST.Node(status, [], statusstart, statusstart), piped], + start, + i, + ) end end diff --git a/src/parser/detachedmodifiersuffix.jl b/src/parser/detachedmodifiersuffix.jl index 36cb9f2..bfc4c81 100644 --- a/src/parser/detachedmodifiersuffix.jl +++ b/src/parser/detachedmodifiersuffix.jl @@ -1,37 +1,35 @@ function parse_norg(::Slide, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i - i = consume_until(K"LineEnding", tokens, i) + i = consume_until(K"LineEnding", tokens, i) p = [K"Slide", parents...] m = match_norg(p, tokens, i) - @debug "ok fréro j'ai ça." tokens[i] m - child = if isfound(m) + children = if isfound(m) if matched(m) == K"Definition" - parse_norg(Definition(), p, tokens, i) + [parse_norg(Definition(), p, tokens, i)] elseif matched(m) == K"Footnote" - parse_norg(Footnote(), p, tokens, i) + [parse_norg(Footnote(), p, tokens, i)] elseif matched(m) == K"Verbatim" - parse_norg(Verbatim(), p, tokens, i) + [parse_norg(Verbatim(), p, tokens, i)] elseif matched(m) == K"StandardRangedTag" - parse_norg(StandardRangedTag(), p, tokens, i) + [parse_norg(StandardRangedTag(), p, tokens, i)] else - parse_norg(Paragraph(), p, tokens, i) + [parse_norg(Paragraph(), p, tokens, i)] end else - parse_norg(parents, tokens, i) + AST.Node[] end - AST.Node(K"Slide", [child], start, AST.stop(child)) + return AST.Node(K"Slide", children, start, AST.stop(last(children))) end function parse_norg(::IndentSegment, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i - i = consume_until(K"LineEnding", tokens, i) + i = consume_until(K"LineEnding", tokens, i) p = [K"IndentSegment", parents...] m = Match.MatchClosing(K"IndentSegment") children = [] while !is_eof(tokens[i]) m = match_norg(p, tokens, i) - @debug "indent segment loop" m tokens[i] if isclosing(m) break elseif iscontinue(m) @@ -86,5 +84,5 @@ function parse_norg(::IndentSegment, parents::Vector{Kind}, tokens::Vector{Token if isclosing(m) && !(matched(m) == K"IndentSegment" && consume(m)) i = prevind(tokens, i) end - AST.Node(K"IndentSegment", children, start, i) + return AST.Node(K"IndentSegment", children, start, i) end diff --git a/src/parser/link.jl b/src/parser/link.jl index 010f6e2..3163044 100644 --- a/src/parser/link.jl +++ b/src/parser/link.jl @@ -1,3 +1,5 @@ +limit_tokens(tokens, stop) = [tokens[begin:stop]...; EOFToken()]::Vector{Token} + function parse_norg(::Link, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i i = nextind(tokens, i) @@ -77,12 +79,14 @@ function parse_norg(::URLLocation, parents::Vector{Kind}, tokens::Vector{Token}, i = prevind(tokens, i) end if isclosing(m) && matched(m) != K"URLLocation" && kind(token) != K"}" - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start) - AST.Node(K"None", vcat(getproperty.(p.children, :children)...), start, i) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start) + AST.Node(K"None", vcat([c.children for c in p.children]...), start, i) else stop = i i = prevind(tokens, i) - AST.Node(K"URLLocation", [AST.Node(K"URLTarget", AST.Node[], start, i)], start, stop) + AST.Node( + K"URLLocation", [AST.Node(K"URLTarget", AST.Node[], start, i)], start, stop + ) end end @@ -99,17 +103,24 @@ function parse_norg(::LineNumberLocation, parents::Vector{Kind}, tokens::Vector{ i = prevind(tokens, i) end if isclosing(m) && matched(m) != K"LineNumberLocation" && kind(token) != K"}" - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start) - AST.Node(K"None", vcat(getproperty.(p.children, :children)...), start, i) + AST.Node(K"None", vcat([c.children for c in p.children]...), start, i) else stop = i i = prevind(tokens, i) - AST.Node(K"LineNumberLocation", [AST.Node(K"LineNumberTarget", AST.Node[], start, i)], start, stop) + AST.Node( + K"LineNumberLocation", + [AST.Node(K"LineNumberTarget", AST.Node[], start, i)], + start, + stop, + ) end end -function parse_norg(::DetachedModifierLocation, parents::Vector{Kind}, tokens::Vector{Token}, i) +function parse_norg( + ::DetachedModifierLocation, parents::Vector{Kind}, tokens::Vector{Token}, i +) start = i token = tokens[i] if kind(token) == K"*" @@ -133,7 +144,7 @@ function parse_norg(::DetachedModifierLocation, parents::Vector{Kind}, tokens::V K"Heading4" elseif level == 5 K"Heading5" - elseif level >= 6 + else # level >= 6 K"Heading6" end elseif kind(token) == K"$" @@ -156,21 +167,26 @@ function parse_norg(::DetachedModifierLocation, parents::Vector{Kind}, tokens::V if !consume(m) || is_eof(token) i = prevind(tokens, i) end - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start_heading_title) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start_heading_title) if kind(token) == K"}" children = AST.Node[] - for (i,c) in enumerate(p.children) + for (i, c) in enumerate(p.children) append!(children, c.children) if i < lastindex(p.children) - push!(children, AST.Node(K"WordNode", [], c.stop, c.stop)) + push!(children, AST.Node(K"WordNode", AST.Node[], c.stop, c.stop)) end end content = AST.Node(K"ParagraphSegment", children, p.start, p.stop) AST.Node(K"DetachedModifierLocation", [AST.Node(heading_kind), content], start, i) else - c = [AST.Node(K"WordNode", [], j, j) for j ∈ start:(p.start-1)] + c = [AST.Node(K"WordNode", [], j, j) for j in start:(p.start - 1)] children = p.children - ps = AST.Node(K"ParagraphSegment", [c...;children[1].children...], start, children[1].stop) + ps = AST.Node( + K"ParagraphSegment", + AST.Node[c...; children[1].children...], + start, + children[1].stop, + ) children[1] = ps AST.Node(K"None", children, start, i) end @@ -194,10 +210,10 @@ function parse_norg(::MagicLocation, parents::Vector{Kind}, tokens::Vector{Token if !consume(m) || is_eof(token) i = prevind(tokens, i) end - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start_heading_title) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start_heading_title) if kind(token) == K"}" children = AST.Node[] - for (i,c) in enumerate(p.children) + for (i, c) in enumerate(p.children) append!(children, c.children) if i < lastindex(p.children) push!(children, AST.Node(K"WordNode", [], c.stop, c.stop)) @@ -206,9 +222,14 @@ function parse_norg(::MagicLocation, parents::Vector{Kind}, tokens::Vector{Token content = AST.Node(K"ParagraphSegment", children, p.start, p.stop) AST.Node(K"MagicLocation", [content], start, i) else - c = [AST.Node(K"WordNode", [], j, j) for j ∈ start:(p.start-1)] + c = [AST.Node(K"WordNode", [], j, j) for j in start:(p.start - 1)] children = p.children - ps = AST.Node(K"ParagraphSegment", [c...;children[1].children...], start, children[1].stop) + ps = AST.Node( + K"ParagraphSegment", + AST.Node[c...; children[1].children...], + start, + children[1].stop, + ) children[1] = ps AST.Node(K"None", children, start, i) end @@ -216,7 +237,9 @@ end filelocationkind(::FileLocation) = K"FileLocation" filelocationkind(::NorgFileLocation) = K"NorgFileLocation" -function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i,) where { T <: Union{FileLocation, NorgFileLocation}} +function parse_norg( + t::T, parents::Vector{Kind}, tokens::Vector{Token}, i +) where {T<:Union{FileLocation,NorgFileLocation}} start = i i = nextind(tokens, i) token = tokens[i] @@ -241,9 +264,9 @@ function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i,) wher i = prevind(tokens, i) end if isclosing(m) && matched(m) != filelocationkind(t) && kind(token) != K"}" - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start) - return AST.Node(K"None", vcat(getproperty.(p.children, :children)...), start, i) + return AST.Node(K"None", vcat([c.children for c in p.children]...), start, i) end if use_neorg_root k = K"FileNorgRootTarget" @@ -267,18 +290,20 @@ function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i,) wher i = prevind(tokens, i) end if isclosing(m) && matched(m) != filelocationkind(t) && kind(token) != K"}" - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start) - return AST.Node(K"None", vcat(getproperty.(p.children, :children)...), start, i) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start) + return AST.Node( + K"None", vcat([c.children for c in p.children]...), start, i + ) end else i = AST.stop(subtarget) # subtarget = first(children(subtarget)) end end - AST.Node(filelocationkind(t), [file_target, subtarget], start, i) + return AST.Node(filelocationkind(t), [file_target, subtarget], start, i) end -function parse_norg(::WikiLocation, parents::Vector{Kind}, tokens::Vector{Token}, i,) +function parse_norg(::WikiLocation, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i i = nextind(tokens, i) token = tokens[i] @@ -296,12 +321,12 @@ function parse_norg(::WikiLocation, parents::Vector{Kind}, tokens::Vector{Token} if !consume(m) || is_eof(token) i = prevind(tokens, i) end - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start_heading_title) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start_heading_title) subtarget = AST.Node(K"None") content = AST.Node(K"None") if kind(token) ∈ KSet"} :" children = AST.Node[] - for (i,c) in enumerate(p.children) + for (i, c) in enumerate(p.children) append!(children, c.children) if i < lastindex(p.children) push!(children, AST.Node(K"WordNode", [], c.stop, c.stop)) @@ -309,9 +334,14 @@ function parse_norg(::WikiLocation, parents::Vector{Kind}, tokens::Vector{Token} end content = AST.Node(K"ParagraphSegment", children, p.start, p.stop) else - c = [AST.Node(K"WordNode", [], j, j) for j ∈ start:(p.start-1)] + c = [AST.Node(K"WordNode", [], j, j) for j in start:(p.start - 1)] children = p.children - ps = AST.Node(K"ParagraphSegment", [c...;children[1].children...], start, children[1].stop) + ps = AST.Node( + K"ParagraphSegment", + AST.Node[c...; children[1].children...], + start, + children[1].stop, + ) children[1] = ps return AST.Node(K"None", children, start, i) end @@ -328,18 +358,19 @@ function parse_norg(::WikiLocation, parents::Vector{Kind}, tokens::Vector{Token} i = prevind(tokens, i) end if isclosing(m) && matched(m) != K"WikiLocation" && kind(token) != K"}" - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start) - return AST.Node(K"None", vcat(getproperty.(p.children, :children)...), start, i) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start) + return AST.Node( + K"None", vcat([c.children for c in p.children]...), start, i + ) end else i = AST.stop(subtarget) end end - AST.Node(K"WikiLocation", [content, subtarget], start, i) + return AST.Node(K"WikiLocation", [content, subtarget], start, i) end - -function parse_norg(::TimestampLocation, parents::Vector{Kind}, tokens::Vector{Token}, i,) +function parse_norg(::TimestampLocation, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i i = nextind(tokens, i) token = tokens[i] @@ -347,7 +378,7 @@ function parse_norg(::TimestampLocation, parents::Vector{Kind}, tokens::Vector{T i = nextind(tokens, i) token = tokens[i] end - start_timestamp=i + start_timestamp = i m = match_norg(parents, tokens, i) while !is_eof(token) && !isclosing(m) i = nextind(tokens, i) @@ -358,12 +389,17 @@ function parse_norg(::TimestampLocation, parents::Vector{Kind}, tokens::Vector{T i = prevind(tokens, i) end if isclosing(m) && matched(m) != K"TimestampLocation" && kind(token) != K"}" - p = parse_norg(Paragraph(), parents, [tokens[begin:i]...; EOFToken()], start) - AST.Node(K"None", vcat(getproperty.(p.children, :children)...), start, i) + p = parse_norg(Paragraph(), parents, limit_tokens(tokens, i), start) + AST.Node(K"None", vcat([c.children for c in p.children]...), start, i) else stop = i i = prevind(tokens, i) - AST.Node(K"TimestampLocation", [AST.Node(K"Timestamp", AST.Node[], start_timestamp, i)], start, stop) + AST.Node( + K"TimestampLocation", + [AST.Node(K"Timestamp", AST.Node[], start_timestamp, i)], + start, + stop, + ) end end @@ -377,7 +413,9 @@ function parse_norg(::LinkDescription, parents::Vector{Kind}, tokens::Vector{Tok if isclosing(m) break end - segment = parse_norg(ParagraphSegment(), [K"LinkDescription", parents...], tokens, i) + segment = parse_norg( + ParagraphSegment(), [K"LinkDescription", parents...], tokens, i + ) i = nextind(tokens, AST.stop(segment)) if kind(segment) == K"None" append!(children, segment.children) @@ -389,14 +427,18 @@ function parse_norg(::LinkDescription, parents::Vector{Kind}, tokens::Vector{Tok if is_eof(tokens[i]) || (isclosing(m) && matched(m) != K"LinkDescription" && matched(m) ∈ parents) || # we've been tricked in thincking we were in a link description (isclosing(m) && kind(tokens[i]) != K"]") - new_children = [parse_norg(Word(), parents, tokens, start), first(children).children...] - children[1] = AST.Node(K"ParagraphSegment", new_children, start, AST.stop(first(children))) + new_children = [ + parse_norg(Word(), parents, tokens, start), first(children).children... + ] + children[1] = AST.Node( + K"ParagraphSegment", new_children, start, AST.stop(first(children)) + ) i = prevind(tokens, i) node_kind = K"None" elseif isclosing(m) && !consume(m) i = prevind(tokens, i) end - AST.Node(node_kind, children, start, i) + return AST.Node(node_kind, children, start, i) end function parse_norg(::Anchor, parents::Vector{Kind}, tokens::Vector{Token}, i) @@ -437,7 +479,9 @@ function parse_norg(::InlineLinkTarget, parents::Vector{Kind}, tokens::Vector{To if isclosing(m) break end - segment = parse_norg(ParagraphSegment(), [K"InlineLinkTarget", parents...], tokens, i) + segment = parse_norg( + ParagraphSegment(), [K"InlineLinkTarget", parents...], tokens, i + ) i = nextind(tokens, AST.stop(segment)) if kind(segment) == K"None" append!(children, segment.children) @@ -449,12 +493,16 @@ function parse_norg(::InlineLinkTarget, parents::Vector{Kind}, tokens::Vector{To if is_eof(tokens[i]) || (isclosing(m) && matched(m) != K"InlineLinkTarget" && matched(m) ∈ parents) || # we've been tricked in thincking we were in a link description (isclosing(m) && kind(tokens[i]) != K">") - new_children = [parse_norg(Word(), parents, tokens, start), first(children).children...] - children[1] = AST.Node(K"ParagraphSegment", new_children, start, AST.stop(first(children))) + new_children = [ + parse_norg(Word(), parents, tokens, start), first(children).children... + ] + children[1] = AST.Node( + K"ParagraphSegment", new_children, start, AST.stop(first(children)) + ) i = prevind(tokens, i) node_kind = K"None" elseif isclosing(m) && !consume(m) i = prevind(tokens, i) end - AST.Node(node_kind, children, start, i) + return AST.Node(node_kind, children, start, i) end diff --git a/src/parser/nestablemodifier.jl b/src/parser/nestablemodifier.jl index 48d2722..4d58368 100644 --- a/src/parser/nestablemodifier.jl +++ b/src/parser/nestablemodifier.jl @@ -1,4 +1,6 @@ -function parse_norg(::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where {T<:Nestable} +function parse_norg( + ::T, parents::Vector{Kind}, tokens::Vector{Token}, i +) where {T<:Nestable} start = i # TODO: This is innefficient because this match has already been done at this # point, so we could transmit the information through the strategy. But this @@ -14,7 +16,6 @@ function parse_norg(::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where end break end - @debug "nestable loop" m tokens[i] child = if kind(matched(m)) == K"WeakCarryoverTag" parse_norg(WeakCarryoverTag(), [nestable_kind, parents...], tokens, i) else @@ -26,7 +27,7 @@ function parse_norg(::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where end push!(children, child) end - AST.Node(nestable_kind, children, start, i) + return AST.Node(nestable_kind, children, start, i) end function parse_norg(::NestableItem, parents::Vector{Kind}, tokens::Vector{Token}, i) @@ -50,7 +51,6 @@ function parse_norg(::NestableItem, parents::Vector{Kind}, tokens::Vector{Token} end while !is_eof(tokens[i]) m = match_norg([K"NestableItem", parents...], tokens, i) - @debug "nestable item loop" m tokens[i] if isclosing(m) if !consume(m) i = prevind(tokens, i) @@ -61,13 +61,15 @@ function parse_norg(::NestableItem, parents::Vector{Kind}, tokens::Vector{Token} if to_parse == K"Verbatim" child = parse_norg(Verbatim(), [K"NestableItem", parents...], tokens, i) elseif to_parse == K"StandardRangedTag" - child = parse_norg(StandardRangedTag(), [K"NestableItem", parents...], tokens, i) + child = parse_norg( + StandardRangedTag(), [K"NestableItem", parents...], tokens, i + ) elseif is_quote(to_parse) - child = parse_norg(Quote(), [K"NestableItem", parents...], tokens, i) + child = parse_norg(Quote(), [K"NestableItem", parents...], tokens, i) elseif is_unordered_list(to_parse) - child = parse_norg(UnorderedList(), [K"NestableItem", parents...], tokens, i) + child = parse_norg(UnorderedList(), [K"NestableItem", parents...], tokens, i) elseif is_ordered_list(to_parse) - child = parse_norg(OrderedList(), [K"NestableItem", parents...], tokens, i) + child = parse_norg(OrderedList(), [K"NestableItem", parents...], tokens, i) elseif to_parse == K"Slide" child = parse_norg(Slide(), [K"NestableItem", parents...], tokens, i) elseif to_parse == K"IndentSegment" @@ -88,5 +90,5 @@ function parse_norg(::NestableItem, parents::Vector{Kind}, tokens::Vector{Token} if is_eof(tokens[i]) i = prevind(tokens, i) end - AST.Node(K"NestableItem", children, start, i) + return AST.Node(K"NestableItem", children, start, i) end diff --git a/src/parser/parser.jl b/src/parser/parser.jl index 741aeb0..cdbc0a7 100644 --- a/src/parser/parser.jl +++ b/src/parser/parser.jl @@ -30,7 +30,9 @@ function parse_norg_toplevel_one_step(parents::Vector{Kind}, tokens::Vector{Toke m = match_norg(parents, tokens, i) to_parse = matched(m) if isclosing(m) - error("Closing token when parsing a top level element at token $(tokens[i]). This is a bug, please report it along with the text you are trying to parse.") + error( + "Closing token when parsing a top level element at token $(tokens[i]). This is a bug, please report it along with the text you are trying to parse.", + ) return AST.Node(K"None", AST.Node[], i, nextind(tokens, i)) elseif iscontinue(m) return AST.Node(K"None", AST.Node[], i, i) @@ -40,11 +42,11 @@ function parse_norg_toplevel_one_step(parents::Vector{Kind}, tokens::Vector{Toke stop = prevind(tokens, consume_until(K"LineEnding", tokens, i)) AST.Node(to_parse, AST.Node[], start, stop) elseif is_quote(to_parse) - parse_norg(Quote(), parents, tokens, i) + parse_norg(Quote(), parents, tokens, i) elseif is_unordered_list(to_parse) - parse_norg(UnorderedList(), parents, tokens, i) + parse_norg(UnorderedList(), parents, tokens, i) elseif is_ordered_list(to_parse) - parse_norg(OrderedList(), parents, tokens, i) + parse_norg(OrderedList(), parents, tokens, i) elseif kind(to_parse) == K"Verbatim" parse_norg(Verbatim(), parents, tokens, i) elseif kind(to_parse) == K"StandardRangedTag" @@ -81,7 +83,6 @@ function parse_norg(tokens::Vector{Token}) children = AST.Node[] while !is_eof(tokens[i]) child = parse_norg_toplevel_one_step([K"NorgDocument"], tokens, i) - @debug "toplevel" i child tokens[i] i = AST.stop(child) if !is_eof(tokens[i]) i = nextind(tokens, i) @@ -93,7 +94,7 @@ function parse_norg(tokens::Vector{Token}) root = AST.Node(K"NorgDocument", children, firstindex(tokens), lastindex(tokens)) ast = AST.NorgDocument(root, tokens) findtargets!(ast) - ast + return ast end function parse_norg(::Paragraph, parents::Vector{Kind}, tokens::Vector{Token}, i) @@ -102,7 +103,6 @@ function parse_norg(::Paragraph, parents::Vector{Kind}, tokens::Vector{Token}, i start = i while !is_eof(tokens[i]) m = match_norg([K"Paragraph", parents...], tokens, i) - @debug "paragraph loop" m tokens[i] if isclosing(m) break elseif iscontinue(m) @@ -136,31 +136,33 @@ function parse_norg(::Paragraph, parents::Vector{Kind}, tokens::Vector{Token}, i elseif isclosing(m) && matched(m) != K"Paragraph" i = prevind(tokens, i) end - AST.Node(K"Paragraph", segments, start, i) + return AST.Node(K"Paragraph", segments, start, i) end """ Main dispatch utility. """ -function parse_norg_dispatch(to_parse::Kind, parents::Vector{Kind}, tokens::Vector{Token}, i) +function parse_norg_dispatch( + to_parse::Kind, parents::Vector{Kind}, tokens::Vector{Token}, i +) if to_parse == K"Escape" - parse_norg(Escape(), parents, tokens, i) + parse_norg(Escape(), parents, tokens, i) elseif to_parse == K"Bold" - parse_norg(Bold(), parents, tokens, i) + parse_norg(Bold(), parents, tokens, i) elseif to_parse == K"Italic" - parse_norg(Italic(), parents, tokens, i) + parse_norg(Italic(), parents, tokens, i) elseif to_parse == K"Underline" - parse_norg(Underline(), parents, tokens, i) + parse_norg(Underline(), parents, tokens, i) elseif to_parse == K"Strikethrough" - parse_norg(Strikethrough(), parents, tokens, i) + parse_norg(Strikethrough(), parents, tokens, i) elseif to_parse == K"Spoiler" - parse_norg(Spoiler(), parents, tokens, i) + parse_norg(Spoiler(), parents, tokens, i) elseif to_parse == K"Superscript" - parse_norg(Superscript(), parents, tokens, i) + parse_norg(Superscript(), parents, tokens, i) elseif to_parse == K"Subscript" - parse_norg(Subscript(), parents, tokens, i) + parse_norg(Subscript(), parents, tokens, i) elseif to_parse == K"InlineCode" - parse_norg(InlineCode(), parents, tokens, i) + parse_norg(InlineCode(), parents, tokens, i) elseif to_parse == K"NullModifier" parse_norg(NullModifier(), parents, tokens, i) elseif to_parse == K"InlineMath" @@ -168,21 +170,21 @@ function parse_norg_dispatch(to_parse::Kind, parents::Vector{Kind}, tokens::Vect elseif to_parse == K"Variable" parse_norg(Variable(), parents, tokens, i) elseif to_parse == K"FreeFormBold" - parse_norg(FreeFormBold(), parents, tokens, i) + parse_norg(FreeFormBold(), parents, tokens, i) elseif to_parse == K"FreeFormItalic" - parse_norg(FreeFormItalic(), parents, tokens, i) + parse_norg(FreeFormItalic(), parents, tokens, i) elseif to_parse == K"FreeFormUnderline" - parse_norg(FreeFormUnderline(), parents, tokens, i) + parse_norg(FreeFormUnderline(), parents, tokens, i) elseif to_parse == K"FreeFormStrikethrough" - parse_norg(FreeFormStrikethrough(), parents, tokens, i) + parse_norg(FreeFormStrikethrough(), parents, tokens, i) elseif to_parse == K"FreeFormSpoiler" - parse_norg(FreeFormSpoiler(), parents, tokens, i) + parse_norg(FreeFormSpoiler(), parents, tokens, i) elseif to_parse == K"FreeFormSuperscript" - parse_norg(FreeFormSuperscript(), parents, tokens, i) + parse_norg(FreeFormSuperscript(), parents, tokens, i) elseif to_parse == K"FreeFormSubscript" - parse_norg(FreeFormSubscript(), parents, tokens, i) + parse_norg(FreeFormSubscript(), parents, tokens, i) elseif to_parse == K"FreeFormInlineCode" - parse_norg(FreeFormInlineCode(), parents, tokens, i) + parse_norg(FreeFormInlineCode(), parents, tokens, i) elseif to_parse == K"FreeFormNullModifier" parse_norg(FreeFormNullModifier(), parents, tokens, i) elseif to_parse == K"FreeFormInlineMath" @@ -190,7 +192,7 @@ function parse_norg_dispatch(to_parse::Kind, parents::Vector{Kind}, tokens::Vect elseif to_parse == K"FreeFormVariable" parse_norg(FreeFormVariable(), parents, tokens, i) elseif to_parse == K"Link" - parse_norg(Link(), parents, tokens, i) + parse_norg(Link(), parents, tokens, i) elseif to_parse == K"Anchor" parse_norg(Anchor(), parents, tokens, i) elseif to_parse == K"InlineLinkTarget" @@ -198,7 +200,9 @@ function parse_norg_dispatch(to_parse::Kind, parents::Vector{Kind}, tokens::Vect elseif to_parse == K"Word" parse_norg(Word(), parents, tokens, i) else - error("parse_norg_dispatch got an unhandled node kind $to_parse for token $(tokens[i])") + error( + "parse_norg_dispatch got an unhandled node kind $to_parse for token $(tokens[i])", + ) end end @@ -207,10 +211,9 @@ function parse_norg(::ParagraphSegment, parents::Vector{Kind}, tokens::Vector{To children = AST.Node[] m = Match.MatchClosing(K"ParagraphSegment") parents = [K"ParagraphSegment", parents...] - siblings = [] + siblings = AST.Node[] while !is_eof(tokens[i]) m = match_norg(parents, tokens, i) - @debug "ps loop" m if isclosing(m) break elseif iscontinue(m) @@ -246,7 +249,9 @@ function parse_norg(::ParagraphSegment, parents::Vector{Kind}, tokens::Vector{To elseif AST.start(first(siblings)) == start AST.Node(K"None", siblings, start, i) else - ps = AST.Node(K"ParagraphSegment", vcat(children, first(siblings).children), start, i) + ps = AST.Node( + K"ParagraphSegment", vcat(children, first(siblings).children), start, i + ) if length(siblings) > 1 AST.Node(K"None", [ps, siblings[2:end]...], start, i) else @@ -258,11 +263,11 @@ end function parse_norg(::Escape, parents::Vector{Kind}, tokens::Vector{Token}, i) next_i = nextind(tokens, i) w = parse_norg(Word(), parents, tokens, next_i) - AST.Node(K"Escape", AST.Node[w], i, next_i) + return AST.Node(K"Escape", AST.Node[w], i, next_i) end function parse_norg(::Word, parents::Vector{Kind}, tokens::Vector{Token}, i) - AST.Node(K"WordNode", AST.Node[], i, i) + return AST.Node(K"WordNode", AST.Node[], i, i) end include("attachedmodifier.jl") diff --git a/src/parser/rangeabledetachedmodifier.jl b/src/parser/rangeabledetachedmodifier.jl index 210ffcf..c56b3dd 100644 --- a/src/parser/rangeabledetachedmodifier.jl +++ b/src/parser/rangeabledetachedmodifier.jl @@ -1,28 +1,28 @@ strategy_to_kind(::Definition) = K"Definition" strategy_to_kind(::Footnote) = K"Footnote" -function parse_norg(t::RangeableDetachedModifier, parents::Vector{Kind}, tokens::Vector{Token}, i) +function parse_norg( + t::RangeableDetachedModifier, parents::Vector{Kind}, tokens::Vector{Token}, i +) start = i parents = [strategy_to_kind(t), parents...] - children = [] + children = AST.Node[] while !is_eof(tokens[i]) - @debug "Ranged mainloop" tokens[i] m = match_norg(parents, tokens, i) - @debug "Ranged matched" m if isclosing(m) if !consume(m) i = prevind(tokens, i) else stop = prevind(tokens, consume_until(K"LineEnding", tokens, i)) - @debug "Consuming until" tokens[stop] if !isempty(children) child = last(children) - children[end] = AST.Node(K"RangeableItem", child.children, AST.start(child), stop) + children[end] = AST.Node( + K"RangeableItem", child.children, AST.start(child), stop + ) end i = stop end break elseif matched(m) ∉ KSet"WeakCarryoverTag RangeableItem" - @debug "Hugo, I'm leaving on" tokens[i] i = prevind(tokens, i) break end @@ -38,7 +38,7 @@ function parse_norg(t::RangeableDetachedModifier, parents::Vector{Kind}, tokens: push!(children, child) end - AST.Node(strategy_to_kind(t), children, start, i) + return AST.Node(strategy_to_kind(t), children, start, i) end function parse_norg(::RangeableItem, parents::Vector{Kind}, tokens::Vector{Token}, i) @@ -60,15 +60,15 @@ function parse_norg(::RangeableItem, parents::Vector{Kind}, tokens::Vector{Token end function parse_norg_unranged_rangeable(parents, tokens, i) - @debug "unranged rangeable" parents tokens[i] title_segment = parse_norg(ParagraphSegment(), parents, tokens, i) - paragraph = parse_norg(Paragraph(), parents, tokens, nextind(tokens, AST.stop(title_segment))) + paragraph = parse_norg( + Paragraph(), parents, tokens, nextind(tokens, AST.stop(title_segment)) + ) - AST.Node(K"RangeableItem", [title_segment, paragraph], i, AST.stop(paragraph)) + return AST.Node(K"RangeableItem", [title_segment, paragraph], i, AST.stop(paragraph)) end function parse_norg_ranged_rangeable(parents, tokens, i) - @debug "ranged rangeable" parents tokens[i] start = i title_segment = parse_norg(ParagraphSegment(), parents, tokens, i) children = [] @@ -76,13 +76,10 @@ function parse_norg_ranged_rangeable(parents, tokens, i) token = tokens[i] while !is_eof(token) m = match_norg(parents, tokens, i) - @debug "ranged item loop" token m - if isclosing(m) - @debug "ok, closing ranged item" m tokens[i] + if isclosing(m) if consume(m) i = consume_until(K"LineEnding", tokens, i) i = prevind(tokens, i) - @debug "consuming until" i tokens[i] else i = prevind(tokens, i) end @@ -106,7 +103,7 @@ function parse_norg_ranged_rangeable(parents, tokens, i) else parse_norg(Paragraph(), parents, tokens, i) end - push!(children, child) + push!(children, child) i = nextind(tokens, AST.stop(child)) token = tokens[i] if is_eof(token) @@ -114,5 +111,7 @@ function parse_norg_ranged_rangeable(parents, tokens, i) token = tokens[i] end end - AST.Node(K"RangeableItem", [title_segment, children...], start, i) + return AST.Node( + K"RangeableItem", [title_segment, children...]::Vector{AST.Node}, start, i + ) end diff --git a/src/parser/structuralmodifier.jl b/src/parser/structuralmodifier.jl index 740ce41..e033439 100644 --- a/src/parser/structuralmodifier.jl +++ b/src/parser/structuralmodifier.jl @@ -12,7 +12,7 @@ function parse_norg(::Heading, parents::Vector{Kind}, tokens::Vector{Token}, i) token = tokens[i] heading_level += 1 end - heading_kind = AST.heading_level(heading_level) + heading_kind = AST.heading_kind(heading_level) if is_whitespace(token) i = nextind(tokens, i) m = match_norg([heading_kind, parents...], tokens, i) @@ -25,12 +25,13 @@ function parse_norg(::Heading, parents::Vector{Kind}, tokens::Vector{Token}, i) i = consume_until(K"Whitespace", tokens, i) end end - title_segment = parse_norg(ParagraphSegment(), [heading_kind, parents...], tokens, i) + title_segment = parse_norg( + ParagraphSegment(), [heading_kind, parents...], tokens, i + ) push!(children, title_segment) i = nextind(tokens, AST.stop(title_segment)) while !is_eof(tokens[i]) m = match_norg([heading_kind, parents...], tokens, i) - @debug "heading loop" m tokens[i] if isclosing(m) break end @@ -40,7 +41,9 @@ function parse_norg(::Heading, parents::Vector{Kind}, tokens::Vector{Token}, i) elseif to_parse == K"WeakDelimitingModifier" start_del = i i = consume_until(K"LineEnding", tokens, i) - push!(children, AST.Node(K"WeakDelimitingModifier", AST.Node[], start_del, i)) + push!( + children, AST.Node(K"WeakDelimitingModifier", AST.Node[], start_del, i) + ) break elseif kind(to_parse) == K"StrongDelimitingModifier" i = prevind(tokens, i) @@ -58,7 +61,9 @@ function parse_norg(::Heading, parents::Vector{Kind}, tokens::Vector{Token}, i) elseif kind(to_parse) == K"Verbatim" child = parse_norg(Verbatim(), [heading_kind, parents...], tokens, i) elseif kind(to_parse) == K"StandardRangedTag" - child = parse_norg(StandardRangedTag(), [heading_kind, parents...], tokens, i) + child = parse_norg( + StandardRangedTag(), [heading_kind, parents...], tokens, i + ) elseif to_parse == K"WeakCarryoverTag" child = parse_norg(WeakCarryoverTag(), parents, tokens, i) elseif to_parse == K"Definition" @@ -81,11 +86,11 @@ function parse_norg(::Heading, parents::Vector{Kind}, tokens::Vector{Token}, i) if isclosing(m) && !(matched(m) == heading_kind && consume(m)) i = prevind(tokens, i) end - + AST.Node(heading_kind, children, start, i) else # if the stars are not followed by a whitespace # This should never happen if matching works correctly - # parse_norg(Paragraph(), parents, tokens, i) + # parse_norg(Paragraph(), parents, tokens, i) error("Matching for headings has a bug. Please report the issue.") end end diff --git a/src/parser/tag.jl b/src/parser/tag.jl index 542840e..85005ce 100644 --- a/src/parser/tag.jl +++ b/src/parser/tag.jl @@ -21,7 +21,6 @@ function parse_tag_header(parents::Vector{Kind}, tokens::Vector{Token}, i) token = tokens[i] end end - @debug "coucou" token if kind(token) == K"Whitespace" i = nextind(tokens, i) token = tokens[i] @@ -29,7 +28,10 @@ function parse_tag_header(parents::Vector{Kind}, tokens::Vector{Token}, i) start_current = i while !is_eof(tokens[i]) && kind(token) != K"LineEnding" if is_whitespace(token) - push!(children, AST.Node(K"TagParameter", AST.Node[], start_current, prevind(tokens, i))) + push!( + children, + AST.Node(K"TagParameter", AST.Node[], start_current, prevind(tokens, i)), + ) i = nextind(tokens, i) start_current = i token = tokens[i] @@ -44,12 +46,15 @@ function parse_tag_header(parents::Vector{Kind}, tokens::Vector{Token}, i) end if kind(token) == K"LineEnding" if start_current < i - push!(children, AST.Node(K"TagParameter", AST.Node[], start_current, prevind(tokens, i))) + push!( + children, + AST.Node(K"TagParameter", AST.Node[], start_current, prevind(tokens, i)), + ) end i = nextind(tokens, i) token = tokens[i] end - children, i + return children, i end tag(::Verbatim) = K"Verbatim" @@ -57,7 +62,7 @@ tag(::StandardRangedTag) = K"StandardRangedTag" body(::Verbatim) = K"VerbatimBody" body(::StandardRangedTag) = K"StandardRangedTagBody" -function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where {T <: Tag} +function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where {T<:Tag} start = i children, i = parse_tag_header(parents, tokens, i) token = tokens[i] @@ -65,17 +70,13 @@ function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where stop_content = i p = [body(t), tag(t), parents...] body_children = AST.Node[] - @debug "tag parsing" start start_content tokens[i] while !is_eof(tokens[i]) m = match_norg(p, tokens, i) - @debug "tag loop" m tokens[i] if isclosing(m) - @debug "Closing tag" m tokens[i] stop_content = prevind(tokens, i) if kind(tokens[i]) == K"LineEnding" i = nextind(tokens, i) end - @debug "after advancing" tokens[i] i = consume_until(K"LineEnding", tokens, i) if tokens[i] != K"EndOfFile" i = prevind(tokens, i) @@ -90,20 +91,22 @@ function parse_norg(t::T, parents::Vector{Kind}, tokens::Vector{Token}, i) where i = nextind(tokens, AST.stop(c)) end push!(children, AST.Node(body(t), body_children, start_content, stop_content)) - @debug "Closed tag" i tokens[i] parents - AST.Node(tag(t), children, start, i) + return AST.Node(tag(t), children, start, i) end function parse_norg(::WeakCarryoverTag, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i children, i = parse_tag_header(parents, tokens, i) - @debug "Weak carryover tag here" tokens[i] - content = parse_norg_toplevel_one_step([parents...], tokens, i) - @debug "hey there" content parents + content = parse_norg_toplevel_one_step(parents, tokens, i) if kind(content) == K"Paragraph" || is_nestable(kind(content)) content_children = content.children first_segment = first(content_children) - content_children[1] = AST.Node(K"WeakCarryoverTag", [children..., first_segment], start, AST.stop(first_segment)) + content_children[1] = AST.Node( + K"WeakCarryoverTag", + [children..., first_segment], + start, + AST.stop(first_segment), + ) AST.Node(kind(content), content_children, AST.start(content), AST.stop(content)) else AST.Node(K"WeakCarryoverTag", [children..., content], start, AST.stop(content)) @@ -113,13 +116,11 @@ end function parse_norg(::StrongCarryoverTag, parents::Vector{Kind}, tokens::Vector{Token}, i) start = i children, i = parse_tag_header(parents, tokens, i) - @debug "Strong carryover tag here" tokens[i] m = match_norg(parents, tokens, i) if isclosing(m) AST.Node(K"StrongCarryoverTag", children, start, prevind(tokens, i)) else - content = parse_norg_toplevel_one_step([parents...], tokens, i) - @debug "hey there" content parents + content = parse_norg_toplevel_one_step(parents, tokens, i) AST.Node(K"StrongCarryoverTag", [children..., content], start, AST.stop(content)) end end diff --git a/src/parser/verbatim.jl b/src/parser/verbatim.jl index e69de29..8b13789 100644 --- a/src/parser/verbatim.jl +++ b/src/parser/verbatim.jl @@ -0,0 +1 @@ + diff --git a/src/scanners.jl b/src/scanners.jl index 6c88670..feb173b 100644 --- a/src/scanners.jl +++ b/src/scanners.jl @@ -16,7 +16,12 @@ matching token was found. struct ScanResult length::Int64 end -ScanResult(res::Bool) = if res ScanResult(1) else ScanResult(0) end +ScanResult(res::Bool) = + if res + ScanResult(1) + else + ScanResult(0) + end success(scanresult::ScanResult) = scanresult.length > 0 abstract type ScanStrategy end @@ -60,7 +65,7 @@ function scan(list::AbstractArray, input) break end end - res + return res end function scan(set::Set{Char}, input) @@ -81,7 +86,7 @@ function scan(::Whitespace, input) break end end - ScanResult(trial_stop) + return ScanResult(trial_stop) end function scan(::Word, input) @@ -97,7 +102,7 @@ function scan(::Word, input) break end end - ScanResult(trial_stop) + return ScanResult(trial_stop) end scan(::LineEnding, input) = scan(NORG_LINE_ENDING, input) @@ -123,8 +128,12 @@ end All the registered [`Kinds.Kind`](@ref) that [`Scanners.scan`](@ref) will try when consuming entries. """ const TOKENKIND_PARSING_ORDER = [ - Kinds.all_single_punctuation_tokens()...; - K"x"; K"LineEnding"; K"Whitespace"; K"Punctuation"; K"Word" + Kinds.all_single_punctuation_tokens()... + K"x" + K"LineEnding" + K"Whitespace" + K"Punctuation" + K"Word" ] """ @@ -148,7 +157,7 @@ function scan(input; line=0, charnum=0) if !success(res) error("No suitable token found for input at line $line, char $charnum") end - Token(tokentype, line, charnum, input[1:res.length]) + return Token(tokentype, line, charnum, input[1:(res.length)]) end export scan diff --git a/src/semantics/timestamps.jl b/src/semantics/timestamps.jl index 7dcb6ff..d3b0304 100644 --- a/src/semantics/timestamps.jl +++ b/src/semantics/timestamps.jl @@ -1,5 +1,4 @@ -using Dates, TimeZones - +using Dates """ parse_norg_timestamp(tokens, start, stop) @@ -18,7 +17,7 @@ Example usage: """ function parse_norg_timestamp(tokens, start, stop) i, t1 = parse_one_norg_timestamp(tokens, start, stop) - if kind(tokens[i]) == K"-" || (i <= stop && kind(tokens[i+1]) == K"-") + if kind(tokens[i]) == K"-" || (i <= stop && kind(tokens[i + 1]) == K"-") if kind(tokens[i]) != K"-" i += 1 end @@ -29,11 +28,10 @@ function parse_norg_timestamp(tokens, start, stop) i, t2 = parse_one_norg_timestamp(tokens, i, stop) t1, t2 = complete_timestamps(t1, t2) t1, t2 = to_datetime(t1), to_datetime(t2) - (;t1, t2) + (; t1, t2) else (t1=to_datetime(t1), t2=nothing) end - end function to_datetime(t) @@ -44,11 +42,11 @@ function to_datetime(t) end stop = findfirst(isnothing.(args)) if !isnothing(stop) - args = args[1:stop-1] + args = args[1:(stop - 1)] end if isempty(args) return nothing - end + end dt = DateTime(args...) if isnothing(t.timezone) dt @@ -75,7 +73,7 @@ function complete_timestamps(t1, t2) dt2[field] = f1 end end - (t1=NamedTuple(dt1), t2=NamedTuple(dt2)) + return (t1=NamedTuple(dt1), t2=NamedTuple(dt2)) end function warn_if_no_separator(param, tokens, i, stop) @@ -86,7 +84,9 @@ function warn_if_no_separator(param, tokens, i, stop) end function parse_one_norg_timestamp_should_return(tokens, i, stop) - i >= stop || kind(tokens[i]) == K"-" || (i <= stop && kind(tokens[i+1]) == K"-") + return i >= stop || + kind(tokens[i]) == K"-" || + (i <= stop && kind(tokens[i + 1]) == K"-") end function parse_one_norg_timestamp(tokens, start, stop) @@ -105,13 +105,13 @@ function parse_one_norg_timestamp(tokens, start, stop) i = nextind(tokens, i) token = tokens[i] if parse_one_norg_timestamp_should_return(tokens, i, stop) - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) elseif kind(token) == K"," i = nextind(tokens, i) token = tokens[i] end if parse_one_norg_timestamp_should_return(tokens, i, stop) - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) else warn_if_no_separator("Day", tokens, i, stop) i = nextind(tokens, i) @@ -125,7 +125,7 @@ function parse_one_norg_timestamp(tokens, start, stop) i = nextind(tokens, i) token = tokens[i] if parse_one_norg_timestamp_should_return(tokens, i, stop) - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) else warn_if_no_separator("Day of the month", tokens, i, stop) i = nextind(tokens, i) @@ -138,7 +138,7 @@ function parse_one_norg_timestamp(tokens, start, stop) i = nextind(tokens, i) token = tokens[i] if parse_one_norg_timestamp_should_return(tokens, i, stop) - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) else warn_if_no_separator("Month", tokens, i, stop) i = nextind(tokens, i) @@ -151,7 +151,7 @@ function parse_one_norg_timestamp(tokens, start, stop) i = nextind(tokens, i) token = tokens[i] if parse_one_norg_timestamp_should_return(tokens, i, stop) - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) else warn_if_no_separator("Year", tokens, i, stop) i = nextind(tokens, i) @@ -163,9 +163,9 @@ function parse_one_norg_timestamp(tokens, start, stop) s = join(value.(tokens[i:next_space])) time = tryparse(Time, s, dateformat"HH:MM.SS") if !isnothing(time) - i = next_space+1 + i = next_space + 1 if parse_one_norg_timestamp_should_return(tokens, i, stop) - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) else warn_if_no_separator("Time", tokens, i, stop) i = nextind(tokens, i) @@ -174,22 +174,14 @@ function parse_one_norg_timestamp(tokens, start, stop) end end if i <= stop - stop_timestamp = Parser.consume_until(KSet"Whitespace -", tokens, i)-2 + stop_timestamp = Parser.consume_until(KSet"Whitespace -", tokens, i) - 2 if stop_timestamp <= stop w = join(value.(tokens[i:stop_timestamp])) - try - timezone = TimeZone(w) - catch e - if e isa ArgumentError - @warn "Unable to process timezone" w tokens[i] - else - rethrow(e) - end - end + timezone = parse_timezone(w) i = stop_timestamp + 1 end end - return i, (;day, day_of_month, month, year, time, timezone) + return i, (; day, day_of_month, month, year, time, timezone) end function parse_day(tokens, start, _) @@ -239,5 +231,13 @@ end function parse_year(tokens, start, _) token = tokens[start] w = value(token) - tryparse(Int64, w) + return tryparse(Int64, w) +end + +function parse_timezone(w) + if HAS_TIMEZONE_CAPABILITIES + parse_timezone(Val(:extension), w) + else + nothing + end end diff --git a/src/strategy.jl b/src/strategy.jl index b3cfa9f..cb55fc9 100644 --- a/src/strategy.jl +++ b/src/strategy.jl @@ -78,12 +78,17 @@ struct FreeFormInlineMath <: VerbatimAttachedModifierStrategy end struct FreeFormVariable <: VerbatimAttachedModifierStrategy end const FreeFormAttachedModifier = Union{ - FreeFormBold, FreeFormItalic, - FreeFormUnderline, FreeFormStrikethrough, - FreeFormSpoiler, FreeFormSuperscript, - FreeFormSubscript, FreeFormNullModifier, - FreeFormInlineCode, FreeFormInlineMath, - FreeFormVariable + FreeFormBold, + FreeFormItalic, + FreeFormUnderline, + FreeFormStrikethrough, + FreeFormSpoiler, + FreeFormSuperscript, + FreeFormSubscript, + FreeFormNullModifier, + FreeFormInlineCode, + FreeFormInlineMath, + FreeFormVariable, } struct Anchor <: FromNode end @@ -140,23 +145,23 @@ export Heading, HeadingTitle, DelimitingModifier, StrongDelimiter export WeakDelimiter, HorizontalRule, Nestable, UnorderedList, OrderedList export Quote, NestableItem export Tag, Verbatim, WeakCarryoverTag, StrongCarryoverTag, StandardRangedTag -export AttachedModifierStrategy, VerbatimAttachedModifierStrategy, Bold, Italic -export Underline, Strikethrough, Spoiler, Superscript, Subscript, InlineCode +export AttachedModifierStrategy, VerbatimAttachedModifierStrategy, Bold, Italic +export Underline, Strikethrough, Spoiler, Superscript, Subscript, InlineCode export NullModifier, InlineMath, Variable export FreeFormBold, FreeFormItalic, FreeFormUnderline, FreeFormStrikethrough export FreeFormSpoiler, FreeFormSuperscript, FreeFormSubscript, FreeFormInlineCode export FreeFormNullModifier, FreeFormInlineMath, FreeFormVariable, FreeFormAttachedModifier -export Anchor, Link, LinkLocation, URLLocation, LineNumberLocation -export DetachedModifierLocation, FileLocation, MagicLocation, NorgFileLocation -export WikiLocation, TimestampLocation, LinkDescription, LinkSubTarget, InlineLinkTarget +export Anchor, Link, LinkLocation, URLLocation, LineNumberLocation +export DetachedModifierLocation, FileLocation, MagicLocation, NorgFileLocation +export WikiLocation, TimestampLocation, LinkDescription, LinkSubTarget, InlineLinkTarget export RangeableDetachedModifier, Definition, Footnote, RangeableItem export DetachedModifierSuffix, Slide, IndentSegment export DetachedModifierExtension -export TodoExtension, TimestampExtension, PriorityExtension, DueDateExtension, StartDateExtension +export TodoExtension, + TimestampExtension, PriorityExtension, DueDateExtension, StartDateExtension export StatusUndone, StatusDone, StatusNeedFurtherInput, StatusUrgent export StatusRecurring, StatusInProgress, StatusOnHold, StatusCancelled export ParagraphSegment, Paragraph, Escape end - diff --git a/src/tokenize.jl b/src/tokenize.jl index b3ca054..c07a3f1 100644 --- a/src/tokenize.jl +++ b/src/tokenize.jl @@ -19,7 +19,7 @@ function tokenize(input::AbstractString) result = [SOFToken()] while i <= lastindex(input) sub = SubString(input, i) - token = Scanners.scan(sub, line = linenum, charnum = charnum) + token = Scanners.scan(sub; line=linenum, charnum=charnum) if is_line_ending(token) linenum += 1 charnum = 1 @@ -30,7 +30,7 @@ function tokenize(input::AbstractString) i = nextind(input, i, length(token)) end push!(result, EOFToken()) - result + return result end export tokenize diff --git a/src/tokens.jl b/src/tokens.jl index a953e0b..1240d54 100644 --- a/src/tokens.jl +++ b/src/tokens.jl @@ -45,11 +45,13 @@ end Create a `Token` of kind `kind` with value `value` at `line` and char number `char`. """ function Token(kind, line, char, value) - Token(kind, TokenPosition(line, char), value) + return Token(kind, TokenPosition(line, char), value) end function Base.show(io::IO, token::Token) - print(io, - "$(kind(token)): $(repr(value(token))), line $(line(token)) col. $(char(token))") + return print( + io, + "Token(K\"$(string(kind(token)))\", \"$(value(token))\", line $(string(line(token))), col. $(string(char(token))))", + ) end SOFToken() = Token(K"StartOfFile", 0, 0, SubString("")) EOFToken() = Token(K"EndOfFile", 0, 0, SubString("")) diff --git a/src/utils.jl b/src/utils.jl index b751a95..bf130c3 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -13,7 +13,7 @@ function consume_until(k::Kind, tokens::Vector{Token}, i) if kind(token) == k i = nextind(tokens, i) end - i + return i end function consume_until(k, tokens::Vector{Token}, i) token = tokens[i] @@ -24,7 +24,7 @@ function consume_until(k, tokens::Vector{Token}, i) if kind(token) ∈ k i = nextind(tokens, i) end - i + return i end """ @@ -34,19 +34,19 @@ Make some text suitable for using it as an id in a document. """ function idify(text) words = map(lowercase, split(text, r"\W+")) - join(filter(!isempty, words), '-') + return join(filter(!isempty, words), '-') end """ - textify(ast, node) + textify(ast, node, escape=identity) -Return the raw text associated with a node. +Return the raw text associated with a node. You can specify an escape function. """ -function textify(ast::NorgDocument, node::Node) +function textify(ast::NorgDocument, node::Node, escape=identity) if is_leaf(node) - AST.litteral(ast, node) + escape(AST.litteral(ast, node)) else - join(textify(ast, c) for c in children(node)) + join(textify(ast, c, escape) for c in children(node)) end end @@ -58,30 +58,18 @@ Return all children and grandchildren of kind `k`. It can also `exclude` certain nodes from recursion. """ function getchildren(node::Node, k::Kind) - filter( - x->kind(x)==k, - collect(PreOrderDFS( - x->kind(x)!=k, - node - )) - ) + return filter(x -> kind(x) == k, collect(PreOrderDFS(x -> kind(x) != k, node))) end function getchildren(node::Node, k::Kind, exclude::Kind) - filter( - x->kind(x)==k, - collect(PreOrderDFS( - x->kind(x)!=k && kind(x)!=exclude, - node - )) + return filter( + x -> kind(x) == k, + collect(PreOrderDFS(x -> kind(x) != k && kind(x) != exclude, node)), ) end function getchildren(node::Node, k::Kind, exclude) - filter( - x->kind(x)==k, - collect(PreOrderDFS( - x->kind(x)!=k && kind(x)∉exclude, - node - )) + return filter( + x -> kind(x) == k, + collect(PreOrderDFS(x -> kind(x) != k && kind(x) ∉ exclude, node)), ) end @@ -96,18 +84,21 @@ attribute of the AST first. """ function findtargets!(ast::NorgDocument) empty!(ast.targets) - for c in children(ast.root) - map(PreOrderDFS(x->kind(x) ∉ KSet"Link Anchor", c)) do n - findtargets!(ast, n) + stack = copy(children(ast.root)) + while !isempty(stack) + c = pop!(stack) + findtargets!(ast, c) + if kind(c) ∉ KSet"Link Anchor" + append!(stack, children(c)) end end end function findtargets!(ast::NorgDocument, node::Node) if AST.is_heading(node) - push!(ast.targets, textify(ast, first(children(node)))=>(kind(node), Ref(node))) + push!(ast.targets, textify(ast, first(children(node))) => (kind(node), Ref(node))) elseif kind(node) ∈ KSet"Definition Footnote" for c in children(node) - push!(ast.targets, textify(ast, first(children(c)))=>(kind(node), Ref(c))) + push!(ast.targets, textify(ast, first(children(c))) => (kind(node), Ref(c))) end end end diff --git a/test/Project.toml b/test/Project.toml index 7d9f2f7..4ffb13d 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,7 +1,8 @@ [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" +Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Gumbo = "708ec375-b3d6-5a57-a7ce-8257bf98657a" +JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" pandoc_jll = "c5432543-76ad-5c9d-82bf-db097047a5e2" diff --git a/test/ast_tests/test_detached_modifier_extension.jl b/test/ast_tests/test_detached_modifier_extension.jl index c94cca7..7997164 100644 --- a/test/ast_tests/test_detached_modifier_extension.jl +++ b/test/ast_tests/test_detached_modifier_extension.jl @@ -14,9 +14,9 @@ todos = [ ("_", K"StatusCancelled") ] -@testset "Extension on detached modifier '$m'." for m in detached_modifier +@testset "Extension on detached modifier '$m'." for m in detached_modifier @testset "Level" for n in 1:7 - @testset "Simple Todos: ($t)" for (t,res) in todos + @testset "Simple Todos: ($t)" for (t, res) in todos s = "$(repeat(m, n)) ($t) hey" ast = norg(s) nestable = first(children(ast.root)) @@ -64,7 +64,7 @@ todos = [ ext, p = children(item) @test kind(ext) == K"TimestampExtension" end - @testset "Todos chained with timestamp: ($t)" for (t,res) in todos + @testset "Todos chained with timestamp: ($t)" for (t, res) in todos s = "$(repeat(m, n)) ($t|@ Tuesday) hey" ast = norg(s) nestable = first(children(ast.root)) @@ -78,7 +78,7 @@ todos = [ ts_ext = last(children(ext)) @test kind(ts_ext) == K"TimestampExtension" end - @testset "Todos chained with due date: ($t)" for (t,res) in todos + @testset "Todos chained with due date: ($t)" for (t, res) in todos s = "$(repeat(m, n)) ($t|< Tuesday) hey" ast = norg(s) nestable = first(children(ast.root)) @@ -92,7 +92,7 @@ todos = [ ts_ext = last(children(ext)) @test kind(ts_ext) == K"DueDateExtension" end - @testset "Todos chained with start date: ($t)" for (t,res) in todos + @testset "Todos chained with start date: ($t)" for (t, res) in todos s = "$(repeat(m, n)) ($t|> Tuesday) hey" ast = norg(s) nestable = first(children(ast.root)) @@ -106,7 +106,7 @@ todos = [ ts_ext = last(children(ext)) @test kind(ts_ext) == K"StartDateExtension" end - @testset "Todos chained with priority: ($t)" for (t,res) in todos + @testset "Todos chained with priority: ($t)" for (t, res) in todos s = "$(repeat(m, n)) ($t|# A) hey" ast = norg(s) nestable = first(children(ast.root)) @@ -122,4 +122,3 @@ todos = [ end end end - diff --git a/test/ast_tests/test_detached_modifier_suffix.jl b/test/ast_tests/test_detached_modifier_suffix.jl index 552a447..d541062 100644 --- a/test/ast_tests/test_detached_modifier_suffix.jl +++ b/test/ast_tests/test_detached_modifier_suffix.jl @@ -3,39 +3,65 @@ AST = Norg.AST textify = Norg.Codegen.textify slide_children = [ - (K"Definition", """\$ Single definition - Hello world""") - (K"Definition", """\$\$ Longer definition - Hello - It's me - \$\$""") - (K"Definition", """\$ Grouped definition - hey - \$ Another one - ho""") - (K"Footnote", """^ Single footnote - Hello world""") - (K"Footnote", """^^ Longer footnote - Hello - It's me - ^^""") - (K"Footnote", """^ Grouped footnote - hey - ^ Another one - ho""") - (K"Verbatim", """@verb foo - This is some very cody code. - @end""") - (K"Paragraph", """I'm a simple paragraph. - Pretty unimpressive eh?""") + ( + K"Definition", + """\$ Single definition +Hello world""", + ) + ( + K"Definition", + """\$\$ Longer definition +Hello +It's me +\$\$""", + ) + ( + K"Definition", + """\$ Grouped definition +hey +\$ Another one +ho""", + ) + ( + K"Footnote", + """^ Single footnote +Hello world""", + ) + ( + K"Footnote", + """^^ Longer footnote +Hello +It's me +^^""", + ) + ( + K"Footnote", + """^ Grouped footnote +hey +^ Another one +ho""", + ) + ( + K"Verbatim", + """@verb foo +This is some very cody code. +@end""", + ) + ( + K"Paragraph", + """I'm a simple paragraph. +Pretty unimpressive eh?""", + ) ] -nestable = [('-', K"UnorderedList1") - ('~', K"OrderedList1") - ('>', K"Quote1")] +nestable = [ + ('-', K"UnorderedList1") + ('~', K"OrderedList1") + ('>', K"Quote1") +] @testset "Slide can have $(child_T) children" for (child_T, child_text) in slide_children - for (m,nestable_T) in nestable + for (m, nestable_T) in nestable s = """$m First line $m : $(child_text) @@ -43,7 +69,7 @@ nestable = [('-', K"UnorderedList1") ast = norg(s) nest = first(children(ast.root)) @test kind(nest) == nestable_T - i1,i2,i3 = children(nest) + i1, i2, i3 = children(nest) @test kind(i1) == K"NestableItem" @test kind(i2) == K"NestableItem" @test kind(i3) == K"NestableItem" @@ -72,7 +98,6 @@ end @test kind(p2) == K"Paragraph" @test textify(ast, p1) == "This is a paragraph." @test textify(ast, p2) == "This is another paragraph inside of the same list item." - end @testset "Delimiter precendence in indent segment" begin ast = norg"""* Heading @@ -92,7 +117,7 @@ end @test kind(li) == K"NestableItem" is = first(children(li)) @test kind(is) == K"IndentSegment" - p,wd = children(is) + p, wd = children(is) @test kind(wd) == K"WeakDelimitingModifier" @test kind(p) == K"Paragraph" @test textify(ast, is) == "Text" @@ -113,21 +138,21 @@ end This is not a part of any indent segment. """ - ul,sd,p = children(ast.root) + ul, sd, p = children(ast.root) @test kind(ul) == K"UnorderedList1" @test kind(sd) == K"StrongDelimitingModifier" @test kind(p) == K"Paragraph" @test textify(ast, p) == "This is not a part of any indent segment." is = first(children(first(children(ul)))) @test kind(is) == K"IndentSegment" - p1,p2,ul = children(is) + p1, p2, ul = children(is) @test kind(p1) == K"Paragraph" @test kind(p2) == K"Paragraph" @test textify(ast, p1) == "This is an indent segment." @test textify(ast, p2) == "This paragraph should also belong to the indent segment." @test kind(ul) == K"UnorderedList2" is = first(children(first(children(ul)))) - p,verb = children(is) + p, verb = children(is) @test kind(p) == K"Paragraph" @test kind(verb) == K"Verbatim" end diff --git a/test/ast_tests/test_headings.jl b/test/ast_tests/test_headings.jl index 18b9ea7..1513cba 100644 --- a/test/ast_tests/test_headings.jl +++ b/test/ast_tests/test_headings.jl @@ -11,7 +11,7 @@ Node = Norg.AST.Node ast = norg(s) - h1,sd,p = children(ast.root) + h1, sd, p = children(ast.root) @test kind(h1) == K"Heading1" @test kind(p) == K"Paragraph" @@ -49,7 +49,7 @@ And here is some more text that has broken out of the matrix. h1 = children(ast.root)[2] p = last(children(ast.root)) - @test kind(hi) == AST.heading_level(i) + @test kind(hi) == AST.heading_kind(i) @test kind(h1) == K"Heading1" @test kind(p) == K"Paragraph" @@ -59,7 +59,7 @@ And here is some more text that has broken out of the matrix. hj = children(h1)[2] for j in 2:i - @test kind(hj) == AST.heading_level(j) + @test kind(hj) == AST.heading_kind(j) hj_title = first(children(hj)) @test kind(hj_title) == K"ParagraphSegment" @test length(children(hj_title)) == 9 @@ -164,7 +164,7 @@ end $m$m$m There """) - p1,delim,p2 = children(ast.root) + p1, delim, p2 = children(ast.root) @test Norg.Codegen.textify(ast, p1) == "Hello" @test Norg.Codegen.textify(ast, p2) == "There" end diff --git a/test/ast_tests/test_links.jl b/test/ast_tests/test_links.jl index dc57500..2500a7a 100644 --- a/test/ast_tests/test_links.jl +++ b/test/ast_tests/test_links.jl @@ -1,21 +1,22 @@ Node = Norg.AST.Node AST = Norg.AST -simple_link_tests = [":norg_file:" => K"NorgFileLocation" - "* heading" => K"DetachedModifierLocation" - "** heading" => K"DetachedModifierLocation" - "*** heading" => K"DetachedModifierLocation" - "**** heading" => K"DetachedModifierLocation" - "***** heading" => K"DetachedModifierLocation" - "****** heading" => K"DetachedModifierLocation" - "******* heading" => K"DetachedModifierLocation" - "# magic" => K"MagicLocation" - "42" => K"LineNumberLocation" - "https://example.org" => K"URLLocation" - "file://example.txt" => K"URLLocation" - "/ example.txt" => K"FileLocation" - "? test" => K"WikiLocation" - "@ Wednesday" => K"TimestampLocation" +simple_link_tests = [ + ":norg_file:" => K"NorgFileLocation" + "* heading" => K"DetachedModifierLocation" + "** heading" => K"DetachedModifierLocation" + "*** heading" => K"DetachedModifierLocation" + "**** heading" => K"DetachedModifierLocation" + "***** heading" => K"DetachedModifierLocation" + "****** heading" => K"DetachedModifierLocation" + "******* heading" => K"DetachedModifierLocation" + "# magic" => K"MagicLocation" + "42" => K"LineNumberLocation" + "https://example.org" => K"URLLocation" + "file://example.txt" => K"URLLocation" + "/ example.txt" => K"FileLocation" + "? test" => K"WikiLocation" + "@ Wednesday" => K"TimestampLocation" ] @testset "basic links: $target" for (link, target) in simple_link_tests @@ -28,7 +29,7 @@ simple_link_tests = [":norg_file:" => K"NorgFileLocation" @test kind(l) == K"Link" @test kind(loc) == target @test kind(space) == K"WordNode" - @test join(value.(ast.tokens[space.start:space.stop])) == " " + @test join(value.(ast.tokens[(space.start):(space.stop)])) == " " end @testset "basic links with description: $target" for (link, target) in simple_link_tests @@ -44,10 +45,11 @@ end @test kind(descr) == K"LinkDescription" descr_ps = first(children(descr)) descr_word = first(children(descr_ps)) - @test join(value.(ast.tokens[descr_word.start:descr_word.stop])) == "descr" + @test join(value.(ast.tokens[(descr_word.start):(descr_word.stop)])) == "descr" end -@testset "Checking markup in link description :$link => $target" for (link, target) in simple_link_tests +@testset "Checking markup in link description :$link => $target" for (link, target) in + simple_link_tests s = "{$link}[*descr*]" ast = norg(s) p = first(children(ast.root)) @@ -83,17 +85,18 @@ end @test kind(target) == K"FileNorgRootTarget" end -subtarget_tests = [":file:1" => K"LineNumberLocation" - ":file:* heading" => K"DetachedModifierLocation" - ":file:** heading" => K"DetachedModifierLocation" - ":file:*** heading" => K"DetachedModifierLocation" - ":file:**** heading" => K"DetachedModifierLocation" - ":file:***** heading" => K"DetachedModifierLocation" - ":file:****** heading" => K"DetachedModifierLocation" - ":file:******* heading" => K"DetachedModifierLocation" - ":file:# magic" => K"MagicLocation" - "/ file.txt:1" => K"LineNumberLocation" - "? test:file:" => K"NorgFileLocation" +subtarget_tests = [ + ":file:1" => K"LineNumberLocation" + ":file:* heading" => K"DetachedModifierLocation" + ":file:** heading" => K"DetachedModifierLocation" + ":file:*** heading" => K"DetachedModifierLocation" + ":file:**** heading" => K"DetachedModifierLocation" + ":file:***** heading" => K"DetachedModifierLocation" + ":file:****** heading" => K"DetachedModifierLocation" + ":file:******* heading" => K"DetachedModifierLocation" + ":file:# magic" => K"MagicLocation" + "/ file.txt:1" => K"LineNumberLocation" + "? test:file:" => K"NorgFileLocation" ] @testset "Checking subtarget :$link => $target" for (link, target) in subtarget_tests s = "{$link}" @@ -111,9 +114,9 @@ leaves_tests = [ "** heading" => [K"Heading2", K"WordNode"] "*** heading" => [K"Heading3", K"WordNode"] "**** heading" => [K"Heading4", K"WordNode"] - "***** heading" =>[K"Heading5", K"WordNode"] - "****** heading" =>[K"Heading6", K"WordNode"] - "******* heading" =>[K"Heading6", K"WordNode"] + "***** heading" => [K"Heading5", K"WordNode"] + "****** heading" => [K"Heading6", K"WordNode"] + "******* heading" => [K"Heading6", K"WordNode"] "# magic" => [K"WordNode"] "42" => [K"LineNumberTarget"] "https://example.org" => [K"URLTarget"] @@ -124,7 +127,7 @@ leaves_tests = [ @testset "Checking leaves :$link => $target" for (link, target) in leaves_tests s = "{$link}" ast = norg(s) - for (l,t) in zip(collect(Leaves(ast.root)), target) + for (l, t) in zip(collect(Leaves(ast.root)), target) @test kind(l) == t end end @@ -148,53 +151,85 @@ end @test kind(last(children(anchor2))) == K"URLLocation" end -anchor_tests = [(input = "[heading 1 anchor]\n\n[heading 1 anchor]{* Heading 1}", -target = K"DetachedModifierLocation") - (input = "[heading 2 anchor]\n\n[heading 2 anchor]{** Heading 2}", - target = K"DetachedModifierLocation") - (input = "[heading 3 anchor]\n\n[heading 3 anchor]{*** Heading 3}", - target = K"DetachedModifierLocation") - (input = "[heading 4 anchor]\n\n[heading 4 anchor]{**** Heading 4}", - target = K"DetachedModifierLocation") - (input = "[heading 5 anchor]\n\n[heading 5 anchor]{***** Heading 5}", - target = K"DetachedModifierLocation") - (input = "[heading 6 anchor]\n\n[heading 6 anchor]{****** Heading 6}", - target = K"DetachedModifierLocation") - (input = "[heading 7 anchor]\n\n[heading 7 anchor]{******* Heading 7}", - target = K"DetachedModifierLocation") - (input = "[generic anchor]\n\n[generic anchor]{# Generic}", - target = K"MagicLocation") - (input = "[norg file anchor]\n\n[norg file anchor]{:norg_file:}", - target = K"NorgFileLocation") - (input = "[external heading 1 anchor]\n\n[external heading 1 anchor]{:norg_file:* Heading 1}", - target = K"NorgFileLocation", - subtarget = K"DetachedModifierLocation") - (input = "[external heading 2 anchor]\n\n[external heading 2 anchor]{:norg_file:** Heading 2}", - target = K"NorgFileLocation", - subtarget = K"DetachedModifierLocation") - (input = "[external heading 3 anchor]\n\n[external heading 3 anchor]{:norg_file:*** Heading 3}", - target = K"NorgFileLocation", - subtarget = K"DetachedModifierLocation") - (input = "[external heading 4 anchor]\n\n[external heading 4 anchor]{:norg_file:**** Heading 4}", - target = K"NorgFileLocation", - subtarget = K"DetachedModifierLocation") - (input = "[external heading 5 anchor]\n\n[external heading 5 anchor]{:norg_file:***** Heading 5}", - target = K"NorgFileLocation", - subtarget = K"DetachedModifierLocation") - (input = "[external heading 7 anchor]\n\n[external heading 6 anchor]{:norg_file:****** Heading 6}", - target = K"NorgFileLocation", - subtarget = K"DetachedModifierLocation") - (input = "[external generic anchor]\n\n[external generic anchor]{:norg_file:# Generic}", - target = K"NorgFileLocation", - subtarget = K"MagicLocation") - (input = "[non-norg file anchor]\n\n[non-norg file anchor]{/ external_file.txt}", - target = K"FileLocation") - (input = "[url anchor]\n\n[url anchor]{https://github.com/}", - target = K"URLLocation") - (input = "[file anchor]\n\n[file anchor]{file:///dev/null}", - target = K"URLLocation") - (input = "[timestamp anchor]\n\n[timestamp anchor]{@ Wednesday}", - target = K"TimestampLocation") +anchor_tests = [ + ( + input="[heading 1 anchor]\n\n[heading 1 anchor]{* Heading 1}", + target=K"DetachedModifierLocation", + ) + ( + input="[heading 2 anchor]\n\n[heading 2 anchor]{** Heading 2}", + target=K"DetachedModifierLocation", + ) + ( + input="[heading 3 anchor]\n\n[heading 3 anchor]{*** Heading 3}", + target=K"DetachedModifierLocation", + ) + ( + input="[heading 4 anchor]\n\n[heading 4 anchor]{**** Heading 4}", + target=K"DetachedModifierLocation", + ) + ( + input="[heading 5 anchor]\n\n[heading 5 anchor]{***** Heading 5}", + target=K"DetachedModifierLocation", + ) + ( + input="[heading 6 anchor]\n\n[heading 6 anchor]{****** Heading 6}", + target=K"DetachedModifierLocation", + ) + ( + input="[heading 7 anchor]\n\n[heading 7 anchor]{******* Heading 7}", + target=K"DetachedModifierLocation", + ) + (input="[generic anchor]\n\n[generic anchor]{# Generic}", target=K"MagicLocation") + ( + input="[norg file anchor]\n\n[norg file anchor]{:norg_file:}", + target=K"NorgFileLocation", + ) + ( + input="[external heading 1 anchor]\n\n[external heading 1 anchor]{:norg_file:* Heading 1}", + target=K"NorgFileLocation", + subtarget=K"DetachedModifierLocation", + ) + ( + input="[external heading 2 anchor]\n\n[external heading 2 anchor]{:norg_file:** Heading 2}", + target=K"NorgFileLocation", + subtarget=K"DetachedModifierLocation", + ) + ( + input="[external heading 3 anchor]\n\n[external heading 3 anchor]{:norg_file:*** Heading 3}", + target=K"NorgFileLocation", + subtarget=K"DetachedModifierLocation", + ) + ( + input="[external heading 4 anchor]\n\n[external heading 4 anchor]{:norg_file:**** Heading 4}", + target=K"NorgFileLocation", + subtarget=K"DetachedModifierLocation", + ) + ( + input="[external heading 5 anchor]\n\n[external heading 5 anchor]{:norg_file:***** Heading 5}", + target=K"NorgFileLocation", + subtarget=K"DetachedModifierLocation", + ) + ( + input="[external heading 7 anchor]\n\n[external heading 6 anchor]{:norg_file:****** Heading 6}", + target=K"NorgFileLocation", + subtarget=K"DetachedModifierLocation", + ) + ( + input="[external generic anchor]\n\n[external generic anchor]{:norg_file:# Generic}", + target=K"NorgFileLocation", + subtarget=K"MagicLocation", + ) + ( + input="[non-norg file anchor]\n\n[non-norg file anchor]{/ external_file.txt}", + target=K"FileLocation", + ) + (input="[url anchor]\n\n[url anchor]{https://github.com/}", target=K"URLLocation") + (input="[file anchor]\n\n[file anchor]{file:///dev/null}", target=K"URLLocation") + ( + input="[timestamp anchor]\n\n[timestamp anchor]{@ Wednesday}", + target=K"TimestampLocation", + ) ] @testset "Testing anchor : $(t.target)" for t in anchor_tests @@ -223,109 +258,106 @@ end end @testset "Endlines in linkables." begin + @testset "Invalid endlines for target $(repr(k))" for (k, _) in simple_link_tests + invalid_singles = [ + """this is not a { + $k} + """ + """nor is this a [linkable + ] + """ + """{ + $k}""" + """{ + $k + }""" + """{$k + }""" + """{ $k}""" + ] + @testset "Invalid examples : $(repr(s))" for s in invalid_singles + ast = norg(s) + @test !any(kind(n) == K"Link" for n in collect(PreOrderDFS(ast))) + end + invalid_complexes = [ + """{$k}[invalid + ]""" + """[invalide]{$k + }""" + """{$k}[ + text + ]""" + """{$k}[text + ]""" + """{$k}[ + text]""" + ] + @testset "Invalid examples : $(repr(s))" for s in invalid_complexes + ast = norg(s) + p = first(children(ast.root)) + ps1, ps2 = children(p) + @test kind(ps1) == K"ParagraphSegment" + @test kind(ps2) == K"ParagraphSegment" + l, ws... = children(ps1) + @test kind(l) == K"Link" || kind(l) == K"Anchor" + @test all(kind.(ws) .== Ref(K"WordNode")) + end -@testset "Invalid endlines for target $(repr(k))" for (k,_) in simple_link_tests - invalid_singles = [ - """this is not a { - $k} - """ - """nor is this a [linkable - ] - """ - """{ - $k}""" - """{ - $k - }""" - """{$k - }""" - """{ $k}""" - ] -@testset "Invalid examples : $(repr(s))" for s in invalid_singles - ast = norg(s) - @test !any(kind(n) == K"Link" for n in collect(PreOrderDFS(ast))) -end - invalid_complexes = [ - """{$k}[invalid - ]""" - """[invalide]{$k - }""" - """{$k}[ - text - ]""" - """{$k}[text - ]""" - """{$k}[ - text]""" - ] -@testset "Invalid examples : $(repr(s))" for s in invalid_complexes - ast = norg(s) - p = first(children(ast.root)) - ps1, ps2 = children(p) - @test kind(ps1) == K"ParagraphSegment" - @test kind(ps2) == K"ParagraphSegment" - l, ws... = children(ps1) - @test kind(l) == K"Link" || kind(l) == K"Anchor" - @test all(kind.(ws) .== Ref(K"WordNode")) -end + invalid_inlines = [ + """< + hi>""" + """""" + ] + @testset "Invalid examples : $(repr(s))" for s in invalid_inlines + ast = norg(s) + @test !any(kind(n) == K"InlineLinkTarget" for n in collect(PreOrderDFS(ast))) + end + end - invalid_inlines = [ - """< - hi>""" - """""" - ] -@testset "Invalid examples : $(repr(s))" for s in invalid_inlines - ast = norg(s) - @test !any(kind(n) == K"InlineLinkTarget" for n in collect(PreOrderDFS(ast))) -end -end + @testset "Valid endlines" begin + valid_singles = [ + "{* some\ntext }" + "{# link\n text}" + "{* a link\nto a heading}" + ] + @testset "Valid examples : $(repr(s))" for s in valid_singles + ast = norg(s) + p = first(children(ast.root)) + ps = first(children(p)) + @test kind(first(children(ps))) == K"Link" + end + valid_complexes = [ + "{/ ~\n myfile.txt}[the `~` character is /not/ treated as a trailing modifier]" + "{* a\n link to a heading}[with\n a description]" + ] + @testset "Valid examples : $(repr(s))" for s in valid_complexes + ast = norg(s) + p = first(children(ast.root)) + ps = first(children(p)) + l = first(children(ps)) + @test kind(l) == K"Link" + loc, descr = children(l) + @test AST.is_link_location(loc) + @test kind(descr) == K"LinkDescription" + end + s = "[te\n xt]{# linkable}" + ast = norg(s) + p = first(children(ast.root)) + ps = first(children(p)) + a = first(children(ps)) + @test kind(a) == K"Anchor" + descr, loc = children(a) + @test AST.is_link_location(loc) + @test kind(descr) == K"LinkDescription" -@testset "Valid endlines" begin - valid_singles = [ - "{* some\ntext }" - "{# link\n text}" - "{* a link\nto a heading}" - ] -@testset "Valid examples : $(repr(s))" for s in valid_singles - ast = norg(s) - p = first(children(ast.root)) - ps = first(children(p)) - @test kind(first(children(ps))) == K"Link" -end - valid_complexes = [ - "{/ ~\n myfile.txt}[the `~` character is /not/ treated as a trailing modifier]" - "{* a\n link to a heading}[with\n a description]" - ] -@testset "Valid examples : $(repr(s))" for s in valid_complexes - ast = norg(s) - p = first(children(ast.root)) - ps = first(children(p)) - l = first(children(ps)) - @test kind(l) == K"Link" - loc, descr = children(l) - @test AST.is_link_location(loc) - @test kind(descr) == K"LinkDescription" -end - s = "[te\n xt]{# linkable}" - ast = norg(s) - p = first(children(ast.root)) - ps = first(children(p)) - a = first(children(ps)) - @test kind(a) == K"Anchor" - descr,loc = children(a) - @test AST.is_link_location(loc) - @test kind(descr) == K"LinkDescription" - - valid_inlines = [ - """""" - ] -@testset "Valid examples : $(repr(s))" for s in valid_inlines - ast = norg(s) - p = first(children(ast.root)) - ps = first(children(p)) - @test kind(first(children(ps))) == K"InlineLinkTarget" -end -end + valid_inlines = [""""""] + @testset "Valid examples : $(repr(s))" for s in valid_inlines + ast = norg(s) + p = first(children(ast.root)) + ps = first(children(p)) + @test kind(first(children(ps))) == K"InlineLinkTarget" + end + end end diff --git a/test/ast_tests/test_markup.jl b/test/ast_tests/test_markup.jl index 1e88890..f06d724 100644 --- a/test/ast_tests/test_markup.jl +++ b/test/ast_tests/test_markup.jl @@ -5,20 +5,20 @@ AST = Norg.AST textify = Norg.Codegen.textify simple_markups = [ -("*", K"Bold"), -("/", K"Italic") , -("_", K"Underline"), -("-", K"Strikethrough"), -("!", K"Spoiler"), -("^", K"Superscript"), -(",", K"Subscript"), -("`", K"InlineCode"), -("%", K"NullModifier"), -("\$", K"InlineMath"), -("&", K"Variable") + ("*", K"Bold"), + ("/", K"Italic"), + ("_", K"Underline"), + ("-", K"Strikethrough"), + ("!", K"Spoiler"), + ("^", K"Superscript"), + (",", K"Subscript"), + ("`", K"InlineCode"), + ("%", K"NullModifier"), + ("\$", K"InlineMath"), + ("&", K"Variable"), ] -@testset "Standalone markup for $m" for (m,k) in simple_markups +@testset "Standalone markup for $m" for (m, k) in simple_markups ast = norg("$(m)inner$(m)") @test ast isa Norg.AST.NorgDocument p = first(children(ast.root)) @@ -31,7 +31,7 @@ simple_markups = [ @test kind(ps) == K"ParagraphSegment" w = first(children(ps)) @test kind(w) == K"WordNode" - @test join(Norg.Tokens.value.(ast.tokens[w.start:w.stop])) == "inner" + @test join(Norg.Tokens.value.(ast.tokens[(w.start):(w.stop)])) == "inner" end @testset "Markup inside a sentence for $m" for (m, k) in simple_markups @@ -47,7 +47,7 @@ end @test kind(ps) == K"ParagraphSegment" w = first(children(ps)) @test kind(w) == K"WordNode" - @test join(Norg.Tokens.value.(ast.tokens[w.start:w.stop])) == "inner" + @test join(Norg.Tokens.value.(ast.tokens[(w.start):(w.stop)])) == "inner" end simple_nested_outer = [ @@ -62,7 +62,8 @@ simple_nested_outer = [ ] @testset "Nested markup $n inside $m" for (m, T) in simple_nested_outer, - (n, U) in simple_markups + (n, U) in simple_markups + if m == n continue end @@ -78,16 +79,12 @@ simple_nested_outer = [ @test kind(ps) == K"ParagraphSegment" w = first(children(ps)) @test kind(w) == K"WordNode" - @test join(Norg.Tokens.value.(ast.tokens[w.start:w.stop])) == "inner" + @test join(Norg.Tokens.value.(ast.tokens[(w.start):(w.stop)])) == "inner" end -verbatim_nested = [ - ("`", K"InlineCode"), - ("\$", K"InlineMath"), - ("&", K"Variable") -] +verbatim_nested = [("`", K"InlineCode"), ("\$", K"InlineMath"), ("&", K"Variable")] -@testset "Verbatim markup nesting test: $V" for (v,V) in verbatim_nested +@testset "Verbatim markup nesting test: $V" for (v, V) in verbatim_nested @testset "Nested markup $T inside $V" for (m, T) in simple_markups if occursin(m, "`\$&") continue @@ -144,7 +141,7 @@ end s = "*/Bold and italic*/" ast = norg(s) ps = first(children(first(children(ast.root)))) - b,w = children(ps) + b, w = children(ps) @test kind(b) == K"Bold" @test kind(w) == K"WordNode" ps = first(children(b)) @@ -196,10 +193,10 @@ end @test kind(ic2) == K"InlineCode" end -@testset "Link modifier for: $T" for (m,T) in simple_markups +@testset "Link modifier for: $T" for (m, T) in simple_markups ast = norg("Intra:$(m)word$(m):markup") ps = first(children(first(children(ast.root)))) - w1,mark,w2 = children(ps) + w1, mark, w2 = children(ps) @test kind(w1) == K"WordNode" @test kind(mark) == T @test kind(w2) == K"WordNode" @@ -208,19 +205,18 @@ end @test textify(ast, w2) == "markup" end - simple_freeformmarkups = [ -("*", K"FreeFormBold"), -("/", K"FreeFormItalic") , -("_", K"FreeFormUnderline"), -("-", K"FreeFormStrikethrough"), -("!", K"FreeFormSpoiler"), -("^", K"FreeFormSuperscript"), -(",", K"FreeFormSubscript"), -("`", K"FreeFormInlineCode"), -("%", K"FreeFormNullModifier"), -("\$", K"FreeFormInlineMath"), -("&", K"FreeFormVariable") + ("*", K"FreeFormBold"), + ("/", K"FreeFormItalic"), + ("_", K"FreeFormUnderline"), + ("-", K"FreeFormStrikethrough"), + ("!", K"FreeFormSpoiler"), + ("^", K"FreeFormSuperscript"), + (",", K"FreeFormSubscript"), + ("`", K"FreeFormInlineCode"), + ("%", K"FreeFormNullModifier"), + ("\$", K"FreeFormInlineMath"), + ("&", K"FreeFormVariable"), ] freeform_templates = [ @@ -230,7 +226,7 @@ freeform_templates = [ " inner " ] -@testset "Standalone markup for $k" for (m,k) in simple_freeformmarkups +@testset "Standalone markup for $k" for (m, k) in simple_freeformmarkups for s in freeform_templates ast = norg("$(m)|$s|$(m)") @test ast isa Norg.AST.NorgDocument @@ -248,12 +244,10 @@ freeform_templates = [ end verbatim_nested = [ - ("`", K"FreeFormInlineCode"), - ("\$", K"FreeFormInlineMath"), - ("&", K"FreeFormVariable") + ("`", K"FreeFormInlineCode"), ("\$", K"FreeFormInlineMath"), ("&", K"FreeFormVariable") ] -@testset "Verbatim markup nesting test: $V" for (v,V) in verbatim_nested +@testset "Verbatim markup nesting test: $V" for (v, V) in verbatim_nested @testset "Nested markup $T inside $V" for (m, T) in simple_markups if occursin(m, "`\$&") continue diff --git a/test/ast_tests/test_nestable_detached_modifiers.jl b/test/ast_tests/test_nestable_detached_modifiers.jl index 476f8e6..f01c9c7 100644 --- a/test/ast_tests/test_nestable_detached_modifiers.jl +++ b/test/ast_tests/test_nestable_detached_modifiers.jl @@ -2,9 +2,11 @@ Node = Norg.AST.Node AST = Norg.AST -nestable = [('-', K"UnorderedList1") - ('~', K"OrderedList1") - ('>', K"Quote1")] +nestable = [ + ('-', K"UnorderedList1") + ('~', K"OrderedList1") + ('>', K"Quote1") +] @testset "$T should be grouping." for (m, T) in nestable s = """$m first item @@ -18,7 +20,8 @@ nestable = [('-', K"UnorderedList1") @test kind(item2) == K"NestableItem" end -@testset "$T grouping should not happen when there is a paragraph break." for (m, T) in nestable +@testset "$T grouping should not happen when there is a paragraph break." for (m, T) in + nestable s = """$m first item $m second item @@ -33,9 +36,11 @@ end @test kind(item2) == K"NestableItem" end -nestable_check = [('-', AST.is_unordered_list) - ('~', AST.is_ordered_list) - ('>', AST.is_quote)] +nestable_check = [ + ('-', AST.is_unordered_list) + ('~', AST.is_ordered_list) + ('>', AST.is_quote) +] @testset "$m should be nestable." for (m, verif) in nestable_check s = """$m item1 diff --git a/test/ast_tests/test_paragraphs.jl b/test/ast_tests/test_paragraphs.jl index 1e1b1a2..eae82b9 100644 --- a/test/ast_tests/test_paragraphs.jl +++ b/test/ast_tests/test_paragraphs.jl @@ -1,8 +1,7 @@ Node = Norg.AST.Node @testset "Two newlines should separate two paragraphs." begin - ast = norg( - "Hi I am first paragraph.\n\nOh, hello there, I am second paragraph !") + ast = norg("Hi I am first paragraph.\n\nOh, hello there, I am second paragraph !") p1, p2 = children(ast.root) @test kind(p1) == K"Paragraph" @test kind(p2) == K"Paragraph" @@ -10,9 +9,10 @@ end @testset "One newline should separate two paragraph segments." begin ast = norg( - "Hi I am first paragraph segment...\nAnd I am second paragraph segment !\n\nOh, hello there, I am second paragraph !") + "Hi I am first paragraph segment...\nAnd I am second paragraph segment !\n\nOh, hello there, I am second paragraph !", + ) p1, p2 = children(ast.root) - ps1,ps2 = children(p1) + ps1, ps2 = children(p1) ps3 = first(children(p2)) @test kind(p1) == K"Paragraph" @test kind(p2) == K"Paragraph" diff --git a/test/ast_tests/test_rangeable_detached_modifiers.jl b/test/ast_tests/test_rangeable_detached_modifiers.jl index 4eee4ec..82fe86d 100644 --- a/test/ast_tests/test_rangeable_detached_modifiers.jl +++ b/test/ast_tests/test_rangeable_detached_modifiers.jl @@ -14,13 +14,13 @@ rangeable = [ outside """ ast = norg(s) - rang,p = children(ast.root) + rang, p = children(ast.root) @test kind(rang) == T @test kind(p) == K"Paragraph" @test textify(ast, p) == "outside" item = first(children(rang)) @test kind(item) == K"RangeableItem" - title,content = children(item) + title, content = children(item) @test kind(title) == K"ParagraphSegment" @test kind(content) == K"Paragraph" @test textify(ast, title) == "title" @@ -36,13 +36,13 @@ end outside """ ast = norg(s) - rang,p = children(ast.root) + rang, p = children(ast.root) @test kind(rang) == T @test kind(p) == K"Paragraph" @test textify(ast, p) == "outside" item = first(children(rang)) @test kind(item) == K"RangeableItem" - title,content1,content2 = children(item) + title, content1, content2 = children(item) @test kind(title) == K"ParagraphSegment" @test kind(content1) == K"Paragraph" @test kind(content2) == K"Paragraph" @@ -60,13 +60,13 @@ end """ ast = norg(s) t = first(children(ast.root)) - _,rang,p = children(t) + _, rang, p = children(t) @test kind(rang) == T @test kind(p) == K"Paragraph" @test textify(ast, p) == "outside" item = first(children(rang)) @test kind(item) == K"RangeableItem" - title,content = children(item) + title, content = children(item) @test kind(title) == K"ParagraphSegment" @test kind(content) == K"Paragraph" @test textify(ast, title) == "title" @@ -84,13 +84,13 @@ end """ ast = norg(s) t = first(children(ast.root)) - _,rang,p = children(t) + _, rang, p = children(t) @test kind(rang) == T @test kind(p) == K"Paragraph" @test textify(ast, p) == "outside" item = first(children(rang)) @test kind(item) == K"RangeableItem" - title,content1,content2 = children(item) + title, content1, content2 = children(item) @test kind(title) == K"ParagraphSegment" @test kind(content1) == K"Paragraph" @test kind(content2) == K"Paragraph" @@ -106,13 +106,13 @@ end outside """ ast = norg(s) - rang,p = children(ast.root) + rang, p = children(ast.root) @test kind(rang) == T @test kind(p) == K"Paragraph" @test textify(ast, p) == "outside" item = first(children(rang)) @test kind(item) == K"RangeableItem" - title,content = children(item) + title, content = children(item) @test kind(title) == K"ParagraphSegment" @test kind(content) == K"Paragraph" @test textify(ast, title) == "title" @@ -128,13 +128,13 @@ end outside """ ast = norg(s) - rang,p = children(ast.root) + rang, p = children(ast.root) @test kind(rang) == T @test kind(p) == K"Paragraph" @test textify(ast, p) == "outside" item = first(children(rang)) @test kind(item) == K"RangeableItem" - title,content1,content2 = children(item) + title, content1, content2 = children(item) @test kind(title) == K"ParagraphSegment" @test kind(content1) == K"Paragraph" @test kind(content2) == K"Paragraph" @@ -144,27 +144,28 @@ end end @testset "Rangeables must be grouping: $T" for (m, T) in rangeable - make_str(str_kind, label) = if str_kind=="simple" - """$m title$(label) - content$(label) - """ - else - """$m$m title$(label) - content$(label) - $m$m - """ - end + make_str(str_kind, label) = + if str_kind == "simple" + """$m title$(label) + content$(label) + """ + else + """$m$m title$(label) + content$(label) + $m$m + """ + end for a in ("simple", "matched") s_a = make_str(a, "a") for b in ("simple", "matched") s_b = make_str(b, "b") for c in ("simple", "matched") s_c = make_str(c, "c") - s = s_a*s_b*s_c + s = s_a * s_b * s_c ast = norg(s) rang = first(children(ast.root)) @test kind(rang) == T - for (l,item) in zip(["a", "b", "c"], children(rang)) + for (l, item) in zip(["a", "b", "c"], children(rang)) @test kind(item) === K"RangeableItem" title, content = children(item) @test textify(ast, title) == "title$l" diff --git a/test/ast_tests/test_tags.jl b/test/ast_tests/test_tags.jl index 3ec9af5..1d7cd3d 100644 --- a/test/ast_tests/test_tags.jl +++ b/test/ast_tests/test_tags.jl @@ -139,38 +139,34 @@ tagtypes = [ end @testset "Weak carryover tag applies to the right elements." begin -@testset "Paragraghs and paragraph segments." begin - ast = norg""" - +test - Applied here. - Not applied here. - """ - p = first(children(ast.root)) - t,ps = children(p) - @test kind(t) == K"WeakCarryoverTag" - @test kind(ps) == K"ParagraphSegment" - label, ps = children(t) - @test Norg.Codegen.textify(ast, label) == "test" - @test Norg.Codegen.textify(ast, ps) == "Applied here." - ast = norg""" - Not applied here. - +test - Applied here. - """ - p = first(children(ast.root)) - ps,t = children(p) - @test kind(t) == K"WeakCarryoverTag" - @test kind(ps) == K"ParagraphSegment" - label, ps = children(t) - @test Norg.Codegen.textify(ast, label) == "test" - @test Norg.Codegen.textify(ast, ps) == "Applied here." -end -nestables = [ - ("-", K"UnorderedList1"), - ("~", K"OrderedList1"), - (">", K"Quote1") -] -@testset "Nestable modifiers: $m" for (t,m) in nestables + @testset "Paragraghs and paragraph segments." begin + ast = norg""" + +test + Applied here. + Not applied here. + """ + p = first(children(ast.root)) + t, ps = children(p) + @test kind(t) == K"WeakCarryoverTag" + @test kind(ps) == K"ParagraphSegment" + label, ps = children(t) + @test Norg.Codegen.textify(ast, label) == "test" + @test Norg.Codegen.textify(ast, ps) == "Applied here." + ast = norg""" + Not applied here. + +test + Applied here. + """ + p = first(children(ast.root)) + ps, t = children(p) + @test kind(t) == K"WeakCarryoverTag" + @test kind(ps) == K"ParagraphSegment" + label, ps = children(t) + @test Norg.Codegen.textify(ast, label) == "test" + @test Norg.Codegen.textify(ast, ps) == "Applied here." + end + nestables = [("-", K"UnorderedList1"), ("~", K"OrderedList1"), (">", K"Quote1")] + @testset "Nestable modifiers: $m" for (t, m) in nestables s = """ +test $t applied @@ -178,7 +174,7 @@ nestables = [ """ ast = norg(s) nestable = first(children(ast.root)) - tag,item = children(nestable) + tag, item = children(nestable) @test kind(tag) == K"WeakCarryoverTag" @test kind(item) == K"NestableItem" @test Norg.Codegen.textify(ast, item) == "not applied" @@ -192,69 +188,71 @@ nestables = [ """ ast = norg(s) nestable = first(children(ast.root)) - item,tag = children(nestable) + item, tag = children(nestable) @test kind(tag) == K"WeakCarryoverTag" @test kind(item) == K"NestableItem" @test Norg.Codegen.textify(ast, item) == "not applied" label, item = children(tag) @test Norg.Codegen.textify(ast, label) == "test" @test Norg.Codegen.textify(ast, item) == "applied" -end -various = [ - (""" - +test - * Heading - hi there - """, K"Heading1") - (""" - +test - @test - blip - @end - """, K"Verbatim") -] -@testset "Various child kind: $k" for (s,k) in various - ast = norg(s) - tag = first(children(ast.root)) - @test kind(tag) == K"WeakCarryoverTag" - label,child = children(tag) - @test Norg.Codegen.textify(ast, label) == "test" - @test kind(child) == k -end + end + various = [ + ( + """ + +test + * Heading + hi there + """, + K"Heading1", + ) + ( + """ + +test + @test + blip + @end + """, + K"Verbatim", + ) + ] + @testset "Various child kind: $k" for (s, k) in various + ast = norg(s) + tag = first(children(ast.root)) + @test kind(tag) == K"WeakCarryoverTag" + label, child = children(tag) + @test Norg.Codegen.textify(ast, label) == "test" + @test kind(child) == k + end end @testset "Strong carryover tag applies to the right elements." begin -@testset "Paragraghs and paragraph segments." begin - ast = norg""" - #test - Applied here. - Applied here too. - """ - t = first(children(ast.root)) - @test length(children(t)) == 2 - label,p = children(t) - @test kind(t) == K"StrongCarryoverTag" - @test kind(p) == K"Paragraph" - @test Norg.Codegen.textify(ast, label) == "test" - ast = norg""" - Not applied here. - #test - Applied here. - """ - p1,t = children(ast.root) - @test kind(t) == K"StrongCarryoverTag" - @test kind(p1) == K"Paragraph" - label, p2 = children(t) - @test kind(p2) == K"Paragraph" - @test Norg.Codegen.textify(ast, label) == "test" - @test Norg.Codegen.textify(ast, p2) == "Applied here." -end -nestables = [ - ("-", K"UnorderedList1"), - ("~", K"OrderedList1"), - (">", K"Quote1") -] -@testset "Nestable modifiers: $m" for (t,m) in nestables + @testset "Paragraghs and paragraph segments." begin + ast = norg""" + #test + Applied here. + Applied here too. + """ + t = first(children(ast.root)) + @test length(children(t)) == 2 + label, p = children(t) + @test kind(t) == K"StrongCarryoverTag" + @test kind(p) == K"Paragraph" + @test Norg.Codegen.textify(ast, label) == "test" + ast = norg""" + Not applied here. + #test + Applied here. + """ + p1, t = children(ast.root) + @test kind(t) == K"StrongCarryoverTag" + @test kind(p1) == K"Paragraph" + label, p2 = children(t) + @test kind(p2) == K"Paragraph" + @test Norg.Codegen.textify(ast, label) == "test" + @test Norg.Codegen.textify(ast, p2) == "Applied here." + end + nestables = [("-", K"UnorderedList1"), ("~", K"OrderedList1"), (">", K"Quote1")] + @testset "Nestable modifiers: $m" for (t, m) in nestables s = """ #test $t applied @@ -278,7 +276,7 @@ nestables = [ $t applied """ ast = norg(s) - nestable,tag = children(ast.root) + nestable, tag = children(ast.root) @test kind(tag) == K"StrongCarryoverTag" @test kind(nestable) == m @test kind(first(children(nestable))) == K"NestableItem" @@ -288,28 +286,34 @@ nestables = [ @test kind(nestable) == m @test kind(first(children(nestable))) == K"NestableItem" @test Norg.Codegen.textify(ast, nestable) == "applied" -end -various = [ - (""" - #test - * Heading - hi there - """, K"Heading1") - (""" - #test - @test - blip - @end - """, K"Verbatim") -] -@testset "Various child kind: $k" for (s,k) in various - ast = norg(s) - tag = first(children(ast.root)) - @test kind(tag) == K"StrongCarryoverTag" - label,child = children(tag) - @test Norg.Codegen.textify(ast, label) == "test" - @test kind(child) == k -end + end + various = [ + ( + """ + #test + * Heading + hi there + """, + K"Heading1", + ) + ( + """ + #test + @test + blip + @end + """, + K"Verbatim", + ) + ] + @testset "Various child kind: $k" for (s, k) in various + ast = norg(s) + tag = first(children(ast.root)) + @test kind(tag) == K"StrongCarryoverTag" + label, child = children(tag) + @test Norg.Codegen.textify(ast, label) == "test" + @test kind(child) == k + end end standard_children = [ diff --git a/test/code_analysis_tests/test_aqua.jl b/test/code_analysis_tests/test_aqua.jl new file mode 100644 index 0000000..8705217 --- /dev/null +++ b/test/code_analysis_tests/test_aqua.jl @@ -0,0 +1,7 @@ +using Aqua + +@static if VERSION < v"1.9" + Aqua.test_all(Norg, ambiguities=false, project_toml_formatting=false) +else + Aqua.test_all(Norg) +end diff --git a/test/code_analysis_tests/test_jet.jl b/test/code_analysis_tests/test_jet.jl new file mode 100644 index 0000000..6600460 --- /dev/null +++ b/test/code_analysis_tests/test_jet.jl @@ -0,0 +1,25 @@ +using JET, AbstractTrees +@testset "JET.jl -> See https://aviatesk.github.io/JET.jl/stable/jetanalysis/#Errors-kinds-and-how-to-fix-them" begin + payload = open(Norg.NORG_SPEC_PATH, "r") do f + read(f, String) + end + + # Error analysis + + # Parse the entire spec + @test_call ignored_modules = (AbstractTrees, Base) norg(payload) + ast = norg(payload) + # HTML codegen + @test_call ignored_modules = (AbstractTrees, Base) Norg.codegen(HTMLTarget(), ast) + # JSON codegen + @test_call ignored_modules = (AbstractTrees, Base) Norg.codegen(JSONTarget(), ast) + + # Optimization analysis + # Parsing + @test_opt ignored_modules = (AbstractTrees, Base) norg(payload) + # Codegen + @test_opt ignored_modules = (AbstractTrees, Base) Norg.codegen(HTMLTarget(), ast) + @test_opt broken = true ignored_modules = (AbstractTrees, Base) Norg.codegen( + JSONTarget(), ast + ) +end diff --git a/test/codegen_tests/html.jl b/test/codegen_tests/html.jl index d3e569a..a36ca69 100644 --- a/test/codegen_tests/html.jl +++ b/test/codegen_tests/html.jl @@ -1,170 +1,171 @@ @testset "HTML target" begin -using Gumbo - -@testset "Test paragraphs" begin - s = "Hi I am first paragraph.\n\nOh, hello there, I am second paragraph !" - html = norg(HTMLTarget(), s) |> string |> parsehtml - pars = html.root[2][1] - @test tag(pars[1]) == :p - @test tag(pars[2]) == :p -end + using Gumbo + + @testset "Test paragraphs" begin + s = "Hi I am first paragraph.\n\nOh, hello there, I am second paragraph !" + html = parsehtml(string(norg(HTMLTarget(), s))) + pars = html.root[2][1] + @test tag(pars[1]) == :p + @test tag(pars[2]) == :p + end -simple_markups_nodes = [ - ('*', :b), - ('/', :i), - ('_', :ins), - ('-', :del), - ('!', :span), - ('^', :sup), - (',', :sub), - ('`', :code), -] - -simple_markups_class = [ - ('*', nothing), - ('/', nothing), - ('_', nothing), - ('-', nothing), - ('!', "spoiler"), - ('^', nothing), - (',', nothing), - ('`', nothing), -] - -@testset "Test correct markup for $m" for (m, html_node) in simple_markups_nodes - s = "$(m)inner$(m)" - html = norg(HTMLTarget(), s) |> string |> parsehtml - b = html.root[2][1][1][1] - @test tag(b) == html_node -end + simple_markups_nodes = [ + ('*', :b), + ('/', :i), + ('_', :ins), + ('-', :del), + ('!', :span), + ('^', :sup), + (',', :sub), + ('`', :code), + ] + + simple_markups_class = [ + ('*', nothing), + ('/', nothing), + ('_', nothing), + ('-', nothing), + ('!', "spoiler"), + ('^', nothing), + (',', nothing), + ('`', nothing), + ] + + @testset "Test correct markup for $m" for (m, html_node) in simple_markups_nodes + s = "$(m)inner$(m)" + html = parsehtml(string(norg(HTMLTarget(), s))) + b = html.root[2][1][1][1] + @test tag(b) == html_node + end -@testset "Test correct class for $m" for (m, html_class) in simple_markups_class - s = "$(m)inner$(m)" - html = norg(HTMLTarget(), s) |> string |> parsehtml - b = html.root[2][1][1][1] - if isnothing(html_class) - @test !haskey(attrs(b), "class") - else - @test haskey(attrs(b), "class") - @test getattr(b, "class") == html_class + @testset "Test correct class for $m" for (m, html_class) in simple_markups_class + s = "$(m)inner$(m)" + html = parsehtml(string(norg(HTMLTarget(), s))) + b = html.root[2][1][1][1] + if isnothing(html_class) + @test !haskey(attrs(b), "class") + else + @test haskey(attrs(b), "class") + @test getattr(b, "class") == html_class + end end -end -simple_link_tests = [ -(":norg_file:", "norg_file", "norg_file") -("* heading", "#h1-heading", "heading") -("** heading", "#h2-heading", "heading") -("*** heading", "#h3-heading", "heading") -("**** heading", "#h4-heading", "heading") -("***** heading", "#h5-heading", "heading") -("****** heading", "#h6-heading", "heading") -("******* heading", "#h6-heading", "heading") -("# magic", "", "magic") -("42", "#l-42", "#l-42") -("https://example.org", "https://example.org", "https://example.org") -("file://example.txt", "file://example.txt", "file://example.txt") -("/ example.txt", "example.txt", "example.txt") -("? test", "/test", "test") -] - -@testset "Test links: $link" for (link, target, text) in simple_link_tests - s = "{$link}" - html = norg(HTMLTarget(), s) |> string |> parsehtml - link = html.root[2][1][1][1] - @test tag(link) == :a - @test getattr(link, "href") == target - @test string(link[1]) == text -end + simple_link_tests = [ + (":norg_file:", "norg_file", "norg_file") + ("* heading", "#h1-heading", "heading") + ("** heading", "#h2-heading", "heading") + ("*** heading", "#h3-heading", "heading") + ("**** heading", "#h4-heading", "heading") + ("***** heading", "#h5-heading", "heading") + ("****** heading", "#h6-heading", "heading") + ("******* heading", "#h6-heading", "heading") + ("# magic", "", "magic") + ("42", "#l-42", "#l-42") + ("https://example.org", "https://example.org", "https://example.org") + ("file://example.txt", "file://example.txt", "file://example.txt") + ("/ example.txt", "example.txt", "example.txt") + ("? test", "/test", "test") + ] + + @testset "Test links: $link" for (link, target, text) in simple_link_tests + s = "{$link}" + html = parsehtml(string(norg(HTMLTarget(), s))) + link = html.root[2][1][1][1] + @test tag(link) == :a + @test getattr(link, "href") == target + @test string(link[1]) == text + end -@testset "Test links with description: $link" for (link, target) in simple_link_tests - s = "{$link}[website]" - html = norg(HTMLTarget(), s) |> string |> parsehtml - link = html.root[2][1][1][1] - @test tag(link) == :a - @test getattr(link, "href") == target - @test string(link[1]) == "website" -end + @testset "Test links with description: $link" for (link, target) in simple_link_tests + s = "{$link}[website]" + html = parsehtml(string(norg(HTMLTarget(), s))) + link = html.root[2][1][1][1] + @test tag(link) == :a + @test getattr(link, "href") == target + @test string(link[1]) == "website" + end -@testset "Anchors with embedded definition: $link" for (link, target) in simple_link_tests - s = "[website]{$link}" - html = norg(HTMLTarget(), s) |> string |> parsehtml - link = html.root[2][1][1][1] - @test tag(link) == :a - @test getattr(link, "href") == target - @test string(link[1]) == "website" -end + @testset "Anchors with embedded definition: $link" for (link, target) in + simple_link_tests + s = "[website]{$link}" + html = parsehtml(string(norg(HTMLTarget(), s))) + link = html.root[2][1][1][1] + @test tag(link) == :a + @test getattr(link, "href") == target + @test string(link[1]) == "website" + end -@testset "Verbatim code" begin - s = """@code julia - using Norg, Hyperscript - s = "*Hi there*" - html = norg(HTMLTarget(), s) |> string |> parsehtml - @end - """ - html = norg(HTMLTarget(), s) |> string |> parsehtml - pre = html.root[2][1][1] - @test tag(pre) == :pre - code = pre[2] - @test tag(code) == :code - @test haskey(attrs(code), "class") - @test getattr(code, "class") == "language-julia" -end + @testset "Verbatim code" begin + s = """@code julia + using Norg, Hyperscript + s = "*Hi there*" + html = norg(HTMLTarget(), s) |> string |> parsehtml + @end + """ + html = parsehtml(string(norg(HTMLTarget(), s))) + pre = html.root[2][1][1] + @test tag(pre) == :pre + code = pre[2] + @test tag(code) == :code + @test haskey(attrs(code), "class") + @test getattr(code, "class") == "language-julia" + end -heading_levels = 1:6 - -@testset "Level $i heading" for i in heading_levels - s = """$(repeat("*", i)) heading - text - """ - html = norg(HTMLTarget(), s) |> string |> parsehtml - section = html.root[2][1][1] - @test tag(section) == :section - @test haskey(attrs(section), "id") - @test getattr(section, "id") == "section-h$(i)-heading" - h,p = children(section) - @test tag(h) == Symbol("h$i") - @test haskey(attrs(h), "id") - @test getattr(h, "id") == "h$(i)-heading" - @test text(first(children(h))) == "heading" - @test tag(p) == :p - @test text(first(children(p))) == "text" -end + heading_levels = 1:6 + + @testset "Level $i heading" for i in heading_levels + s = """$(repeat("*", i)) heading + text + """ + html = parsehtml(string(norg(HTMLTarget(), s))) + section = html.root[2][1][1] + @test tag(section) == :section + @test haskey(attrs(section), "id") + @test getattr(section, "id") == "section-h$(i)-heading" + h, p = children(section) + @test tag(h) == Symbol("h$i") + @test haskey(attrs(h), "id") + @test getattr(h, "id") == "h$(i)-heading" + @test text(first(children(h))) == "heading" + @test tag(p) == :p + @test text(first(children(p))) == "text" + end -nestable_lists = ['~'=>:ol, '-'=>:ul] -@testset "$target list" for (m, target) in nestable_lists - s = """$m Hello, salute sinchero oon kydooke - $m Shintero yuo been na - $m Na sinchere fedicheda - """ - html = norg(HTMLTarget(), s) |> string |> parsehtml - list = html.root[2][1][1] - @test tag(list) == target - lis = children(list) - @test all(tag.(lis) .== :li) -end + nestable_lists = ['~' => :ol, '-' => :ul] + @testset "$target list" for (m, target) in nestable_lists + s = """$m Hello, salute sinchero oon kydooke + $m Shintero yuo been na + $m Na sinchere fedicheda + """ + html = parsehtml(string(norg(HTMLTarget(), s))) + list = html.root[2][1][1] + @test tag(list) == target + lis = children(list) + @test all(tag.(lis) .== :li) + end -@testset "quote" begin - s = "> I QUOTE you" - html = norg(HTMLTarget(), s) |> string |> parsehtml - q = html.root[2][1][1] - @test tag(q) == :blockquote -end + @testset "quote" begin + s = "> I QUOTE you" + html = parsehtml(string(norg(HTMLTarget(), s))) + q = html.root[2][1][1] + @test tag(q) == :blockquote + end -@testset "inline link" begin - s = """""" - html = norg(HTMLTarget(), s) |> string |> parsehtml - p = html.root[2][1][1] - @test length(children(p)) == 1 - span = first(children(p)) - @test haskey(attrs(span), "id") - @test getattr(span, "id") == "inline-link-target" -end + @testset "inline link" begin + s = """ """ + html = parsehtml(string(norg(HTMLTarget(), s))) + p = html.root[2][1][1] + @test length(children(p)) == 1 + span = first(children(p)) + @test haskey(attrs(span), "id") + @test getattr(span, "id") == "inline-link-target" + end -@testset "Parse the entier Norg spec without error." begin - s = open(Norg.NORG_SPEC_PATH, "r") do f - read(f, String) + @testset "Parse the entier Norg spec without error." begin + s = open(Norg.NORG_SPEC_PATH, "r") do f + read(f, String) + end + html = parsehtml(string(norg(HTMLTarget(), s))) + @test html isa HTMLDocument end - html = norg(HTMLTarget(), s) |> string |> parsehtml - @test html isa HTMLDocument -end end diff --git a/test/codegen_tests/json.jl b/test/codegen_tests/json.jl index f174c56..ef82ce4 100644 --- a/test/codegen_tests/json.jl +++ b/test/codegen_tests/json.jl @@ -1,192 +1,208 @@ @testset "JSON target" begin -using OrderedCollections -import JSON -# generated JSON correctness is checked directly with pandoc -using pandoc_jll - -function pandoc_approval(json) - io_err = PipeBuffer() - try - pandoc() do pandoc_bin - io = PipeBuffer() - JSON.print(io, json) - run(pipeline(`$(pandoc_bin) -f json -t json`, stdin=io, stdout=devnull, stderr=io_err)) - end - catch e - err = String(take!(io_err)) - @error "Pandoc error" err - return false + using JSON + # generated JSON correctness is checked directly with pandoc + using pandoc_jll + + function pandoc_approval(json) + io_err = PipeBuffer() + try + pandoc() do pandoc_bin + io = PipeBuffer() + write(io, json) + run( + pipeline( + `$(pandoc_bin) -f json -t json`; + stdin=io, + stdout=devnull, + stderr=io_err, + ), + ) + end + catch e + err = String(take!(io_err)) + @error "Pandoc error" err + return false + end + return true end - true -end - -@testset "Test paragraphs" begin - s = "Hi I am first paragraph.\n\nOh, hello there, I am second paragraph !" - json = Norg.codegen(Norg.JSONTarget(), Norg.parse_norg(Norg.tokenize(s))) - @test pandoc_approval(json) - pars = json["blocks"] - @test pars[1]["t"] == "Para" - @test pars[2]["t"] == "Para" -end -simple_markups_nodes = [ - ('*', "Strong"), - ('/', "Emph"), - ('_', "Underline"), - ('-', "Strikeout"), - ('!', "Span"), - ('^', "Superscript"), - (',', "Subscript"), - ('`', "Code"), -] - -simple_markups_class = [ - ('*', nothing), - ('/', nothing), - ('_', nothing), - ('-', nothing), - ('!', "spoiler"), - ('^', nothing), - (',', nothing), - ('`', nothing), -] - -@testset "Test correct markup for $m" for (m, node) in simple_markups_nodes - s = "$(m)inner$(m)" - json = Norg.codegen(Norg.JSONTarget(), Norg.parse_norg(Norg.tokenize(s))) - @test pandoc_approval(json) - b = json["blocks"][1]["c"][1] - @test b["t"] == node -end + @testset "Test paragraphs" begin + s = "Hi I am first paragraph.\n\nOh, hello there, I am second paragraph !" + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + pars = json["blocks"] + @test pars[1]["t"] == "Para" + @test pars[2]["t"] == "Para" + end -@testset "Test correct class for $m" for (m, class) in simple_markups_class - s = "$(m)inner$(m)" - json = Norg.codegen(Norg.JSONTarget(), Norg.parse_norg(Norg.tokenize(s))) - @test pandoc_approval(json) - b = json["blocks"][1]["c"][1] - if !isnothing(class) - @test first(b["c"])[2][1] == class + simple_markups_nodes = [ + ('*', "Strong"), + ('/', "Emph"), + ('_', "Underline"), + ('-', "Strikeout"), + ('!', "Span"), + ('^', "Superscript"), + (',', "Subscript"), + ('`', "Code"), + ] + + simple_markups_class = [ + ('*', nothing), + ('/', nothing), + ('_', nothing), + ('-', nothing), + ('!', "spoiler"), + ('^', nothing), + (',', nothing), + ('`', nothing), + ] + + @testset "Test correct markup for $m" for (m, node) in simple_markups_nodes + s = "$(m)inner$(m)" + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + b = json["blocks"][1]["c"][1] + @test b["t"] == node end -end -simple_link_tests = [ -(":norg_file:", "norg_file", "norg_file") -("* heading", "#h1-heading", "heading") -("** heading", "#h2-heading", "heading") -("*** heading", "#h3-heading", "heading") -("**** heading", "#h4-heading", "heading") -("***** heading", "#h5-heading", "heading") -("****** heading", "#h6-heading", "heading") -("******* heading", "#h6-heading", "heading") -("# magic", "", "magic") -("42", "#l-42", "#l-42") -("https://example.org", "https://example.org", "https://example.org") -("file://example.txt", "file://example.txt", "file://example.txt") -("/ example.txt", "example.txt", "example.txt") -("? test", "/test", "test") -] - -@testset "Test links: $link" for (link, target, text) in simple_link_tests - s = "{$link}" - json = Norg.codegen(Norg.JSONTarget(), Norg.parse_norg(Norg.tokenize(s))) - @test pandoc_approval(json) - link = json["blocks"][1]["c"][1] - @test link["t"] == "Link" - @test link["c"][2][1]["t"] == "Str" - @test link["c"][2][1]["c"] == text - @test link["c"][3][1] == target -end + @testset "Test correct class for $m" for (m, class) in simple_markups_class + s = "$(m)inner$(m)" + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + b = json["blocks"][1]["c"][1] + if !isnothing(class) + @test first(b["c"])[2][1] == class + end + end -@testset "Test links with description: $link" for (link, target) in simple_link_tests - s = "{$link}[website]" - json = Norg.codegen(Norg.JSONTarget(), Norg.parse_norg(Norg.tokenize(s))) - @test pandoc_approval(json) - link = json["blocks"][1]["c"][1] - @test link["t"] == "Link" - @test link["c"][2][1]["t"] == "Str" - @test link["c"][2][1]["c"] == "website" - @test link["c"][3][1] == target -end + simple_link_tests = [ + (":norg_file:", "norg_file", "norg_file") + ("* heading", "#h1-heading", "heading") + ("** heading", "#h2-heading", "heading") + ("*** heading", "#h3-heading", "heading") + ("**** heading", "#h4-heading", "heading") + ("***** heading", "#h5-heading", "heading") + ("****** heading", "#h6-heading", "heading") + ("******* heading", "#h6-heading", "heading") + ("# magic", "", "magic") + ("42", "#l-42", "#l-42") + ("https://example.org", "https://example.org", "https://example.org") + ("file://example.txt", "file://example.txt", "file://example.txt") + ("/ example.txt", "example.txt", "example.txt") + ("? test", "/test", "test") + ] + + @testset "Test links: $link" for (link, target, text) in simple_link_tests + s = "{$link}" + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + link = json["blocks"][1]["c"][1] + @test link["t"] == "Link" + @test link["c"][2][1]["t"] == "Str" + @test link["c"][2][1]["c"] == text + @test link["c"][3][1] == target + end -@testset "Anchors with embedded definition: $link" for (link, target) in simple_link_tests - s = "[website]{$link}" - json = Norg.codegen(Norg.JSONTarget(), Norg.parse_norg(Norg.tokenize(s))) - @test pandoc_approval(json) - link = json["blocks"][1]["c"][1] - @test link["t"] == "Link" - @test link["c"][2][1]["t"] == "Str" - @test link["c"][2][1]["c"] == "website" - @test link["c"][3][1] == target -end + @testset "Test links with description: $link" for (link, target) in simple_link_tests + s = "{$link}[website]" + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + link = json["blocks"][1]["c"][1] + @test link["t"] == "Link" + @test link["c"][2][1]["t"] == "Str" + @test link["c"][2][1]["c"] == "website" + @test link["c"][3][1] == target + end -@testset "Verbatim code" begin - s = """@code julia - using Norg - s = "*Hi there*" - json = norg(Norg.JSONTarget(), s) - @end - """ - json = norg(JSONTarget(), s) - @test pandoc_approval(json) - cb = json["blocks"][1] - @test cb["t"] == "CodeBlock" - attr, content = cb["c"] - @test attr[2][1] == "julia" - @test content == """using Norg\ns = "*Hi there*"\njson = norg(Norg.JSONTarget(), s)\n""" -end + @testset "Anchors with embedded definition: $link" for (link, target) in + simple_link_tests + s = "[website]{$link}" + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + link = json["blocks"][1]["c"][1] + @test link["t"] == "Link" + @test link["c"][2][1]["t"] == "Str" + @test link["c"][2][1]["c"] == "website" + @test link["c"][3][1] == target + end -heading_levels = 1:6 - -@testset "Level $i heading" for i in heading_levels - s = """$(repeat("*", i)) heading - text - """ - json = norg(JSONTarget(), s) - @test pandoc_approval(json) - container = json["blocks"][1] - @test container["t"] == "Div" - attr, content = container["c"] - @test first(attr) == "section-h$i-heading" - heading = first(content) - @test heading["t"] == "Header" - hlevel, attr, title = heading["c"] - @test hlevel == i - @test attr[1] == "h$i-heading" - @test title[1]["c"] == "heading" -end + @testset "Verbatim code" begin + s = """@code julia + using Norg + s = "*Hi there*" + json = norg(Norg.JSONTarget(), s) + @end + """ + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + cb = json["blocks"][1] + @test cb["t"] == "CodeBlock" + attr, content = cb["c"] + @test attr[2][1] == "julia" + @test content == + """using Norg\ns = "*Hi there*"\njson = norg(Norg.JSONTarget(), s)\n""" + end -nestable_lists = ['~'=>"OrderedList", '-'=>"BulletList", ">"=>"BlockQuote"] -@testset "$target nestable" for (m, target) in nestable_lists - s = """$m Hello, salute sinchero oon kydooke - $m Shintero yuo been na - $m Na sinchere fedicheda - """ - json = norg(JSONTarget(), s) - @test pandoc_approval(json) - list = json["blocks"][1] - @test list["t"] == target -end + heading_levels = 1:6 + + @testset "Level $i heading" for i in heading_levels + s = """$(repeat("*", i)) heading + text + """ + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + container = json["blocks"][1] + @test container["t"] == "Div" + attr, content = container["c"] + @test first(attr) == "section-h$i-heading" + heading = first(content) + @test heading["t"] == "Header" + hlevel, attr, title = heading["c"] + @test hlevel == i + @test attr[1] == "h$i-heading" + @test title[1]["c"] == "heading" + end -@testset "inline link" begin - s = """ """ - json = norg(JSONTarget(), s) - @test pandoc_approval(json) - p = json["blocks"][1] - @test length(p["c"]) == 1 - span = first(p["c"]) - @test span["t"] == "Span" - attrs, inlines = span["c"] - id = first(attrs) - @test id == "inline-link-target" -end + nestable_lists = ['~' => "OrderedList", '-' => "BulletList", ">" => "BlockQuote"] + @testset "$target nestable" for (m, target) in nestable_lists + s = """$m Hello, salute sinchero oon kydooke + $m Shintero yuo been na + $m Na sinchere fedicheda + """ + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + list = json["blocks"][1] + @test list["t"] == target + end -@testset "Parse the entire Norg spec without error." begin - s = open(Norg.NORG_SPEC_PATH) do f - read(f, String) + @testset "inline link" begin + s = """ """ + json_str = norg(JSONTarget(), s) + @test pandoc_approval(json_str) + json = JSON.parse(json_str) + p = json["blocks"][1] + @test length(p["c"]) == 1 + span = first(p["c"]) + @test span["t"] == "Span" + attrs, inlines = span["c"] + id = first(attrs) + @test id == "inline-link-target" end - json = norg(JSONTarget(), s) - @test pandoc_approval(json) - @test json isa OrderedDict -end + @testset "Parse the entire Norg spec without error." begin + s = open(Norg.NORG_SPEC_PATH) do f + read(f, String) + end + json = norg(JSONTarget(), s) + @test pandoc_approval(json) + end end diff --git a/test/runtests.jl b/test/runtests.jl index 186ca7f..e59af3f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,12 +1,17 @@ using Norg using Test using AbstractTrees +using Aqua import Norg: @K_str, kind, value @testset "Norg.jl" begin - @testset "scanners.jl" begin include("test_scanners.jl") end - @testset "Tokenize.jl" begin include("test_tokenize.jl") end + @testset "scanners.jl" begin + include("test_scanners.jl") + end + @testset "Tokenize.jl" begin + include("test_tokenize.jl") + end @testset "parser.jl" begin include("ast_tests/test_markup.jl") include("ast_tests/test_paragraphs.jl") @@ -19,8 +24,14 @@ import Norg: @K_str, kind, value include("ast_tests/test_detached_modifier_suffix.jl") # include("ast_tests/misc_bugs.jl") end - @testset "codegen.jl" begin - include("codegen_tests/html.jl") + @testset "codegen.jl" begin + include("codegen_tests/html.jl") include("codegen_tests/json.jl") end + @testset "code analysis" begin + if VERSION ≥ v"1.9" + include("code_analysis_tests/test_jet.jl") + end + include("code_analysis_tests/test_aqua.jl") + end end diff --git a/test/test_scanners.jl b/test/test_scanners.jl index cc10e01..73e2cbb 100644 --- a/test/test_scanners.jl +++ b/test/test_scanners.jl @@ -1,36 +1,34 @@ @testset "Line ending tokens" begin - @test Norg.Scanners.scan(Norg.Scanners.LineEnding(), "\r\nfoo") |> - Norg.Scanners.success - @test Norg.Scanners.scan(Norg.Scanners.LineEnding(), "foo") |> - !(Norg.Scanners.success) + @test Norg.Scanners.success(Norg.Scanners.scan(Norg.Scanners.LineEnding(), "\r\nfoo")) + @test !(Norg.Scanners.success)(Norg.Scanners.scan(Norg.Scanners.LineEnding(), "foo")) @test Norg.is_line_ending(Norg.Scanners.scan("\r\nfoo")) end @testset "Whitespace tokens" begin - @test Norg.Scanners.scan(Norg.Scanners.Whitespace(), " foo") |> - Norg.Scanners.success - @test Norg.Scanners.scan(Norg.Scanners.Whitespace(), "foo") |> - !Norg.Scanners.success + @test Norg.Scanners.success(Norg.Scanners.scan(Norg.Scanners.Whitespace(), " foo")) + @test !Norg.Scanners.success(Norg.Scanners.scan(Norg.Scanners.Whitespace(), "foo")) @test Norg.is_whitespace(Norg.Scanners.scan(" foo")) end @testset "Generic punctuation token" begin - @test Norg.Scanners.scan(Norg.Scanners.Punctuation(), - string(rand(Norg.Scanners.NORG_PUNCTUATION)) * - "foo") |> - Norg.Scanners.success - @test Norg.Scanners.scan(Norg.Scanners.Punctuation(), "foo") |> !Norg.Scanners.success - @test Norg.is_punctuation(Norg.Scanners.scan(string(rand(Norg.Scanners.NORG_PUNCTUATION)) * "foo")) + @test Norg.Scanners.success(Norg.Scanners.scan( + Norg.Scanners.Punctuation(), string(rand(Norg.Scanners.NORG_PUNCTUATION)) * "foo" + )) + @test !Norg.Scanners.success(Norg.Scanners.scan(Norg.Scanners.Punctuation(), "foo")) + @test Norg.is_punctuation( + Norg.Scanners.scan(string(rand(Norg.Scanners.NORG_PUNCTUATION)) * "foo") + ) end -@testset "Single punctuation kind $kind" for kind in Norg.Kinds.all_single_punctuation_tokens() - @test Norg.Scanners.scan(kind, "$(kind)foo") |> Norg.Scanners.success +@testset "Single punctuation kind $kind" for kind in + Norg.Kinds.all_single_punctuation_tokens() + @test Norg.Scanners.success(Norg.Scanners.scan(kind, "$(kind)foo")) @test Norg.kind(Norg.Scanners.scan("$(kind)foo")) == kind - @test Norg.Scanners.scan(kind, "foo") |> !Norg.Scanners.success + @test !Norg.Scanners.success(Norg.Scanners.scan(kind, "foo")) end @testset "Word token" begin - @test Norg.Scanners.scan(Norg.Scanners.Word(), "foo") |> Norg.Scanners.success - @test Norg.Scanners.scan("foo") |> Norg.is_word - @test Norg.Scanners.scan(Norg.Scanners.Word(), "}foo") |> !Norg.Scanners.success + @test Norg.Scanners.success(Norg.Scanners.scan(Norg.Scanners.Word(), "foo")) + @test Norg.is_word(Norg.Scanners.scan("foo")) + @test !Norg.Scanners.success(Norg.Scanners.scan(Norg.Scanners.Word(), "}foo")) end