Просмотр исходного кода

fix #153: parse (and unparse) comment tags {# .. #}

call the {# .. #}-construct for comment tag (previously comment_inline).
n.b. {% comment %} ... {% endcomment %} is a comment block.

some cleanups in sources_parser + handle the new comment_tag token.

beginnings of tests for the unparser (currently lives in the sources parser test suite).
Andreas Stenius 11 лет назад
Родитель
Сommit
1ece0a6b35

+ 2 - 0
src/erlydtl_beam_compiler.erl

@@ -573,6 +573,8 @@ body_ast(DjangoParseTree, BodyScope, TreeWalker) ->
                 call_with_ast(Name, With, TW);
             ({'comment', _Contents}, TW) ->
                 empty_ast(TW);
+            ({'comment_tag', _, _}, TW) ->
+                empty_ast(TW);
             ({'cycle', Names}, TW) ->
                 cycle_ast(Names, TW);
             ({'cycle_compat', Names}, TW) ->

+ 4 - 8
src/erlydtl_parser.yrl

@@ -52,7 +52,7 @@ Nonterminals
     BlockBraced
     EndBlockBraced
 
-    CommentInline
+    CommentTag
     CommentBlock
     CommentBraced
     EndCommentBraced
@@ -143,7 +143,7 @@ Terminals
     call_keyword
     close_tag
     close_var
-    comment_inline
+    comment_tag
     comment_keyword
     cycle_keyword
     elif_keyword
@@ -226,7 +226,7 @@ Elements -> Elements BlockTransBlock : '$1' ++ ['$2'].
 Elements -> Elements CallTag : '$1' ++ ['$2'].
 Elements -> Elements CallWithTag : '$1' ++ ['$2'].
 Elements -> Elements CommentBlock : '$1' ++ ['$2'].
-Elements -> Elements CommentInline : '$1' ++ ['$2'].
+Elements -> Elements CommentTag : '$1' ++ ['$2'].
 Elements -> Elements CustomTag : '$1' ++ ['$2'].
 Elements -> Elements CycleTag : '$1' ++ ['$2'].
 Elements -> Elements ExtendsTag : '$1' ++ ['$2'].
@@ -299,7 +299,7 @@ CommentBlock -> CommentBraced Elements EndCommentBraced : {comment, '$2'}.
 CommentBraced -> open_tag comment_keyword close_tag.
 EndCommentBraced -> open_tag endcomment_keyword close_tag.
 
-CommentInline -> comment_inline : {comment, inline_comment_to_string('$1')}.
+CommentTag -> comment_tag : '$1'.
 
 CycleTag -> open_tag cycle_keyword CycleNamesCompat close_tag : {cycle_compat, '$3'}.
 CycleTag -> open_tag cycle_keyword CycleNames close_tag : {cycle, '$3'}.
@@ -428,8 +428,4 @@ CallWithTag -> open_tag call_keyword identifier with_keyword Value close_tag : {
 
 Erlang code.
 
-inline_comment_to_string({comment_inline, Pos, S}) ->
-    %% inline comment converted to block comment for simplicity
-    [{string, Pos, S}].
-
 %% vim: syntax=erlang

+ 4 - 5
src/erlydtl_scanner.erl

@@ -36,7 +36,7 @@
 %%%-------------------------------------------------------------------
 -module(erlydtl_scanner).
 
-%% This file was generated 2014-03-21 23:07:32 UTC by slex 0.2.1.
+%% This file was generated 2014-04-09 13:00:24 UTC by slex 0.2.1.
 %% http://github.com/erlydtl/slex
 -slex_source(["src/erlydtl_scanner.slex"]).
 
@@ -181,11 +181,10 @@ scan("#}" ++ T, S, {R, C}, {_, "#}"}) ->
 scan([H | T], S, {R, C} = P, {in_comment, E} = St) ->
     scan(T,
 	 case S of
-	   [{comment_inline, _, L} = M | Ss] ->
+	   [{comment_tag, _, L} = M | Ss] ->
 	       [setelement(3, M, [H | L]) | Ss];
 	   _ ->
-	       [{comment_inline, P, [H]} | post_process(S,
-							comment_inline)]
+	       [{comment_tag, P, [H]} | post_process(S, comment_tag)]
 	 end,
 	 case H of
 	   $\n -> {R + 1, 1};
@@ -540,7 +539,7 @@ post_process(_, {string, _, L} = T, _) ->
     setelement(3, T, begin L1 = lists:reverse(L), L1 end);
 post_process(_, {string_literal, _, L} = T, _) ->
     setelement(3, T, begin L1 = lists:reverse(L), L1 end);
-post_process(_, {comment_inline, _, L} = T, _) ->
+post_process(_, {comment_tag, _, L} = T, _) ->
     setelement(3, T, begin L1 = lists:reverse(L), L1 end);
 post_process(_, {number_literal, _, L} = T, _) ->
     setelement(3, T,

+ 3 - 3
src/erlydtl_scanner.slex

@@ -72,7 +72,7 @@ end.
 30 #} any+: skip, in_text-.
 
 %% must come before the `space any' rule
-40 any in_comment: +comment_inline.
+40 any in_comment: +comment_tag.
 %% end comment rules
 
 %% The rest is "just" text..
@@ -257,7 +257,7 @@ end.
 
 string: lists reverse.
 string_literal: lists reverse.
-comment_inline: lists reverse.
+comment_tag: lists reverse.
 number_literal: lists reverse, list_to_integer.
 open_var: to_atom.
 close_var: to_atom.
@@ -373,4 +373,4 @@ end.
 
 form format_where(in_comment) -> "in comment"; \
      format_where(in_code) -> "in code block" \
-end.
+end.

+ 3 - 1
src/erlydtl_unparser.erl

@@ -5,7 +5,7 @@ unparse(DjangoParseTree) ->
     unparse(DjangoParseTree, []).
 
 unparse([], Acc) ->
-    lists:reverse(Acc);
+    lists:flatten(lists:reverse(Acc));
 unparse([{'extends', Value}|Rest], Acc) ->
     unparse(Rest, [["{% extends ", unparse_value(Value), " %}"]|Acc]);
 unparse([{'autoescape', OnOrOff, Contents}|Rest], Acc) ->
@@ -22,6 +22,8 @@ unparse([{'call', Identifier, With}|Rest], Acc) ->
     unparse(Rest, [["{% call ", unparse_identifier(Identifier), " with ", unparse_args(With), " %}"]|Acc]);
 unparse([{'comment', Contents}|Rest], Acc) ->
     unparse(Rest, [["{% comment %}", unparse(Contents), "{% endcomment %}"]|Acc]);
+unparse([{'comment_tag', _Pos, Text}|Rest], Acc) ->
+    unparse(Rest, [["{#", Text, "#}"]|Acc]);
 unparse([{'cycle', Names}|Rest], Acc) ->
     unparse(Rest, [["{% cycle ", unparse(Names), " %}"]|Acc]);
 unparse([{'cycle_compat', Names}|Rest], Acc) ->

+ 27 - 28
src/i18n/sources_parser.erl

@@ -53,7 +53,7 @@
 -define(bail(Fmt, Args),
         throw(lists:flatten(io_lib:format(Fmt, Args)))).
 
--define(GET_FIELD(Name), phrase_info(Name, P) -> P#phrase.Name).
+-define(GET_FIELD(Key), phrase_info(Key, #phrase{ Key = Value }) -> Value).
 
 %%
 %% API Functions
@@ -73,19 +73,17 @@ process_content(Path, Content) ->
 %% @doc convert new API output to old one.
 -spec to_compat([phrase()]) -> [compat_phrase()].
 to_compat(Phrases) ->
-    Convert = fun(#phrase{msgid=Str, file=File, line=Line, col=Col}) ->
-                      {Str, {File, Line, Col}}
-              end,
-    lists:map(Convert, Phrases).
+    [{Str, {File, Line, Col}}
+     || #phrase{msgid=Str, file=File, line=Line, col=Col}
+            <- Phrases].
 
 %% New API
 
 %% @doc extract info about phrase.
 %% See `field()' type for list of available info field names.
--spec phrase_info([field()] | field(), phrase()) -> [Info] | Info
-                                                        when
+-spec phrase_info([field()] | field(), phrase()) -> [Info] | Info when
       Info :: non_neg_integer() | string() | undefined.
-?GET_FIELD(msgid);                                  %little magick
+?GET_FIELD(msgid);
 ?GET_FIELD(msgid_plural);
 ?GET_FIELD(context);
 ?GET_FIELD(comment);
@@ -94,7 +92,7 @@ to_compat(Phrases) ->
 ?GET_FIELD(col);
 phrase_info(Fields, Phrase) when is_list(Fields) ->
     %% you may pass list of fields
-    lists:map(fun(Field) -> phrase_info(Field, Phrase) end, Fields).
+    [phrase_info(Field, Phrase) || Field <- Fields].
 
 %% @doc list files, using wildcard and extract phrases from them
 -spec parse_pattern([string()]) -> [phrase()].
@@ -103,7 +101,7 @@ parse_pattern(Pattern) ->
     GetFiles = fun(Path,Acc) -> Acc ++ filelib:wildcard(Path) end,
     Files = lists:foldl(GetFiles,[],Pattern),
     io:format("Parsing files ~p~n",[Files]),
-    ParsedFiles = lists:map(fun(File)-> parse_file(File) end, Files),
+    ParsedFiles = [parse_file(File) || File <- Files],
     lists:flatten(ParsedFiles).
 
 %% @doc extract phrases from single file
@@ -132,11 +130,14 @@ parse_content(Path,Content)->
 %%
 
 process_ast(Fname, Tokens) ->
-    {ok, (process_ast(Fname, Tokens, #state{}))#state.acc }.
-process_ast(_Fname, [], St) -> St;
-process_ast(Fname,[Head|Tail], St) ->
-    NewSt = process_token(Fname,Head,St),
-    process_ast(Fname, Tail, NewSt).
+    State = process_ast(Fname, Tokens, #state{}),
+    {ok, State#state.acc}.
+
+process_ast(Fname, Tokens, State) ->
+    lists:foldl(
+      fun (Token, St) ->
+              process_token(Fname, Token, St)
+      end, State, Tokens).
 
 %%Block are recursivelly processed, trans are accumulated and other tags are ignored
 process_token(Fname, {block,{identifier,{_Line,_Col},_Identifier},Children}, St) -> process_ast(Fname, Children, St);
@@ -151,7 +152,7 @@ process_token(_Fname, {apply_filter, _Value, _Filter}, St) -> St;
 process_token(_Fname, {date, now, _Filter}, St) -> St;
 process_token(Fname, {blocktrans, Args, Contents}, #state{acc=Acc, translators_comment=Comment}=St) ->
     {Fname, Line, Col} = guess_blocktrans_lc(Fname, Args, Contents),
-    Phrase = #phrase{msgid=lists:flatten(erlydtl_unparser:unparse(Contents)),
+    Phrase = #phrase{msgid=erlydtl_unparser:unparse(Contents),
                      comment=Comment,
                      file=Fname,
                      line=Line,
@@ -159,6 +160,8 @@ process_token(Fname, {blocktrans, Args, Contents}, #state{acc=Acc, translators_c
     St#state{acc=[Phrase | Acc], translators_comment=undefined};
 process_token(_, {comment, Comment}, St) ->
     St#state{translators_comment=maybe_translators_comment(Comment)};
+process_token(_Fname, {comment_tag, _Pos, Comment}, St) ->
+    St#state{translators_comment=translators_comment_text(Comment)};
 process_token(Fname, {_Instr, _Cond, Children}, St) -> process_ast(Fname, Children, St);
 process_token(Fname, {_Instr, _Cond, Children, Children2}, St) ->
     StModified = process_ast(Fname, Children, St),
@@ -182,19 +185,15 @@ guess_blocktrans_lc(Fname, _, _) ->
 
 
 maybe_translators_comment([{string, _Pos, S}]) ->
-    %% fast path
-    case is_translators(S) of
-        true -> S;
-        false -> undefined
-    end;
+    translators_comment_text(S);
 maybe_translators_comment(Other) ->
     %% smth like "{%comment%}Translators: Hey, {{var}} is variable substitution{%endcomment%}"
-    Unparsed = lists:flatten(erlydtl_unparser:unparse(Other)),
-    case is_translators(Unparsed) of
-        true -> Unparsed;
-        false -> undefined
-    end.
+    Unparsed = erlydtl_unparser:unparse(Other),
+    translators_comment_text(Unparsed).
 
-is_translators(S) ->
+translators_comment_text(S) ->
     Stripped = string:strip(S, left),
-    "translators:" == string:to_lower(string:substr(Stripped, 1, 12)).
+    case "translators:" == string:to_lower(string:substr(Stripped, 1, 12)) of
+        true -> S;
+        false -> undefined
+    end.

+ 138 - 5
test/sources_parser_tests.erl

@@ -1,15 +1,12 @@
 -module(sources_parser_tests).
 
 -include_lib("eunit/include/eunit.hrl").
+-include("include/erlydtl_ext.hrl").
 
 all_sources_parser_test_() ->
     [{Title, [test_fun(Test) || Test <- Tests]}
      || {Title, Tests} <- test_defs()].
 
-all_sources_parser_ext_test_() ->
-    [test_ext_fun(Test) || Test <- ext_test_defs()].
-
-
 test_fun({Name, Content, Output}) ->
     {Name, fun () ->
                    Tokens = (catch sources_parser:process_content("dummy_path", Content)),
@@ -35,11 +32,14 @@ test_defs() ->
         [{"Hello inside an if inside a for",{"dummy_path",1,73}}]},
        {"if and else both with trans",
         <<"<html>{% block content %}{% if thing %} {% trans \"Hello inside an if\" %} {% else %} {% trans \"Hello inside an else\" %} {% endif %} {% endblock %}</html>">>,
-        [ {"Hello inside an else",{"dummy_path",1,94}}, {"Hello inside an if",{"dummy_path",1,50}}]}
+        [{"Hello inside an else",{"dummy_path",1,94}}, {"Hello inside an if",{"dummy_path",1,50}}]}
       ]}
     ].
 
 
+all_sources_parser_ext_test_() ->
+    [test_ext_fun(Test) || Test <- ext_test_defs()].
+
 test_ext_fun({Name, Tpl, {Fields, Output}}) ->
     {Name, fun() ->
                    Tokens = [sources_parser:phrase_info(Fields, P)
@@ -58,3 +58,136 @@ ext_test_defs() ->
       <<"{%comment%}translators: comment{%endcomment%}{%blocktrans with a=b%}B={{b}}{%endblocktrans%}">>,
       {[msgid, comment], [["B={{ b }}", "translators: comment"]]}}
     ].
+
+unparser_test_() ->
+    [test_unparser_fun(Test) || Test <- unparser_test_defs()].
+
+test_unparser_fun({Name, Tpl}) ->
+    {Name, fun() ->
+                   %% take input Tpl value, parse it, "unparse" it, then parse it again.
+                   %% the both parsed values should be equvialent, even if the source versions
+                   %% are not an exact match (there can be whitespace differences)
+                   {ok, Dpt} = erlydtl_compiler:do_parse_template(
+                                 Tpl, #dtl_context{}),
+                   Unparsed = erlydtl_unparser:unparse(Dpt),
+                   {ok, DptU} = erlydtl_compiler:do_parse_template(
+                                 Unparsed, #dtl_context{}),
+                   compare_tree(Dpt, DptU)
+           end}.
+
+unparser_test_defs() ->
+    [{"comment tag", <<"here it is: {# this is my comment #} <-- it was right there.">>}
+    ].
+
+
+compare_tree([], []) -> ok;
+compare_tree([H1|T1], [H2|T2]) ->
+    compare_token(H1, H2),
+    compare_tree(T1, T2).
+
+compare_token({'extends', Value1}, {'extends', Value2}) ->
+    ?assertEqual(Value1, Value2);
+compare_token({'autoescape', OnOrOff1, Contents1}, {'autoescape', OnOrOff2, Contents2}) ->
+    ?assertEqual(OnOrOff1, OnOrOff2),
+    compare_tree(Contents1, Contents2);
+compare_token({'block', Identifier1, Contents1}, {'block', Identifier2, Contents2}) ->
+    compare_identifier(Identifier1, Identifier2),
+    compare_tree(Contents1, Contents2);
+compare_token({'blocktrans', Args1, Contents1}, {'blocktrans', Args2, Contents2}) ->
+    compare_args(Args1, Args2),
+    compare_tree(Contents1, Contents2);
+compare_token({'call', Identifier1}, {'call', Identifier2}) ->
+    compare_identifier(Identifier1, Identifier2);
+compare_token({'call', Identifier1, With1}, {'call', Identifier2, With2}) ->
+    ?assertEqual(With1, With2),
+    compare_identifier(Identifier1, Identifier2);
+compare_token({'comment', Contents1}, {'comment', Contents2}) ->
+    compare_tree(Contents1, Contents2);
+compare_token({'comment_tag', _Pos, Text1}, {'comment_tag', _Pos, Text2}) ->
+    ?assertEqual(Text1, Text2);
+compare_token({'cycle', Names1}, {'cycle', Names2}) ->
+    compare_tree(Names1, Names2);
+compare_token({'cycle_compat', Names1}, {'cycle_compat', Names2}) ->
+    compare_cycle_compat_names(Names1, Names2);
+compare_token({'date', 'now', Value1}, {'date', 'now', Value2}) ->
+    compare_value(Value1, Value2);
+compare_token({'filter', FilterList1, Contents1}, {'filter', FilterList2, Contents2}) ->
+    compare_filters(FilterList1, FilterList2),
+    compare_tree(Contents1, Contents2);
+compare_token({'firstof', Vars1}, {'firstof', Vars2}) ->
+    compare_tree(Vars1, Vars2);
+%% TODO...
+%% compare_token({'for', {'in', IteratorList, Identifier}, Contents}, {'for', {'in', IteratorList, Identifier}, Contents}) -> ok;
+%% compare_token({'for', {'in', IteratorList, Identifier}, Contents, EmptyPartsContents}, {'for', {'in', IteratorList, Identifier}, Contents, EmptyPartsContents}) -> ok;
+%% compare_token({'if', Expression, Contents}, {'if', Expression, Contents}) -> ok;
+%% compare_token({'ifchanged', Expression, IfContents}, {'ifchanged', Expression, IfContents}) -> ok;
+%% compare_token({'ifchangedelse', Expression, IfContents, ElseContents}, {'ifchangedelse', Expression, IfContents, ElseContents}) -> ok;
+%% compare_token({'ifelse', Expression, IfContents, ElseContents}, {'ifelse', Expression, IfContents, ElseContents}) -> ok;
+%% compare_token({'ifequal', [Arg1, Arg2], Contents}, {'ifequal', [Arg1, Arg2], Contents}) -> ok;
+%% compare_token({'ifequalelse', [Arg1, Arg2], IfContents, ElseContents}, {'ifequalelse', [Arg1, Arg2], IfContents, ElseContents}) -> ok;
+%% compare_token({'ifnotequal', [Arg1, Arg2], Contents}, {'ifnotequal', [Arg1, Arg2], Contents}) -> ok;
+%% compare_token({'ifnotequalelse', [Arg1, Arg2], IfContents, ElseContents}, {'ifnotequalelse', [Arg1, Arg2], IfContents, ElseContents}) -> ok;
+%% compare_token({'include', Value, []}, {'include', Value, []}) -> ok;
+%% compare_token({'include', Value, Args}, {'include', Value, Args}) -> ok;
+%% compare_token({'include_only', Value, []}, {'include_only', Value, []}) -> ok;
+%% compare_token({'include_only', Value, Args}, {'include_only', Value, Args}) -> ok;
+%% compare_token({'regroup', {Variable, Identifier1, Identifier2}, Contents}, {'regroup', {Variable, Identifier1, Identifier2}, Contents}) -> ok;
+%% compare_token({'spaceless', Contents}, {'spaceless', Contents}) -> ok;
+%% compare_token({'ssi', Arg}, {'ssi', Arg}) -> ok;
+%% compare_token({'ssi_parsed', Arg}, {'ssi_parsed', Arg}) -> ok;
+compare_token({'string', _, String1}, {'string', _, String2}) ->
+    ?assertEqual(String1, String2);
+%% compare_token({'tag', Identifier, []}, {'tag', Identifier, []}) -> ok;
+%% compare_token({'tag', Identifier, Args}, {'tag', Identifier, Args}) -> ok;
+%% compare_token({'templatetag', Identifier}, {'templatetag', Identifier}) -> ok;
+%% compare_token({'trans', Value}, {'trans', Value}) -> ok;
+%% compare_token({'widthratio', Numerator, Denominator, Scale}, {'widthratio', Numerator, Denominator, Scale}) -> ok;
+%% compare_token({'with', Args, Contents}, {'with', Args, Contents}) -> ok;
+compare_token(ValueToken1, ValueToken2) ->
+    compare_value(ValueToken1, ValueToken2).
+
+compare_identifier({identifier, _, Name1}, {identifier, _, Name2}) ->
+    ?assertEqual(Name1, Name2).
+
+compare_filters(FilterList1, FilterList2) ->
+    [compare_filter(F1, F2)
+     || {F1, F2} <- lists:zip(FilterList1, FilterList2)].
+
+compare_filter([Identifier1], [Identifier2]) ->
+    compare_identifier(Identifier1, Identifier2);
+compare_filter([Identifier1, Arg1], [Identifier2, Arg2]) ->
+    compare_identifier(Identifier1, Identifier2),
+    compare_value(Arg1, Arg2).
+
+compare_expression({'expr', _, Arg11, Arg12}, {'expr', _, Arg21, Arg22}) ->
+    compare_value(Arg11, Arg21),
+    compare_value(Arg12, Arg22);
+compare_expression({'expr', "not", Expr1}, {'expr', "not", Expr2}) ->
+    compare_expression(Expr1, Expr2);
+compare_expression(Other1, Other2) ->
+    compare_value(Other1, Other2).
+
+compare_value({'string_literal', _, Value1}, {'string_literal', _, Value2}) ->
+    ?assertEqual(Value1, Value2);
+compare_value({'number_literal', _, Value1}, {'number_literal', _, Value2}) ->
+    ?assertEqual(Value1, Value2);
+compare_value({'apply_filter', Variable1, Filter1}, {'apply_filter', Variable2, Filter2}) ->
+    compare_value(Variable1, Variable2),
+    compare_filter(Filter1, Filter2);
+compare_value({'attribute', {Variable1, Identifier1}}, {'attribute', {Variable2, Identifier2}}) ->
+    compare_value(Variable1, Variable2),
+    compare_identifier(Identifier1, Identifier2);
+compare_value({'variable', Identifier1}, {'variable', Identifier2}) ->
+    compare_identifier(Identifier1, Identifier2).
+
+compare_args(Args1, Args2) ->
+    [compare_arg(A1, A2)
+     || {A1, A2} <- lists:zip(Args1, Args2)].
+
+compare_arg({{identifier, _, Name1}, Value1}, {{identifier, _, Name2}, Value2}) ->
+    ?assertEqual(Name1, Name2),
+    compare_value(Value1, Value2).
+
+compare_cycle_compat_names(Names1, Names2) ->
+    [compare_identifier(N1, N2)
+     || {N1, N2} <- lists:zip(Names1, Names2)].