NewLang Project
Yet another programm language
Loading...
Searching...
No Matches
lexer_test.cpp
Go to the documentation of this file.
1#ifdef BUILD_UNITTEST
2
3#include "warning_push.h"
4#include <gtest/gtest.h>
5#include "warning_pop.h"
6
7#include "term.h"
8#include "lexer.h"
9#include "parser.h"
10#include "macro.h"
11
12
13using namespace newlang;
14
15class Lexer : public ::testing::Test {
16protected:
17
18 std::vector<TermPtr> tokens;
19
20 void SetUp() {
21 }
22
23 void TearDown() {
24 }
25
26 int64_t TokenParse(const char *str, bool ignore_space = true, bool ignore_indent = true, bool ignore_comment = true, bool ignore_crlf = true) {
27 std::istringstream strstr(str);
28
29 Scanner lexer(&strstr);
30
31 lexer.m_ignore_indent = ignore_indent;
32 lexer.m_ignore_space = ignore_space;
33 lexer.m_ignore_comment = ignore_comment;
34 lexer.m_ignore_crlf = ignore_crlf;
35
36 tokens.clear();
37 TermPtr tok;
38 parser::location_type loc;
39 while (lexer.lex(&tok, &loc) != parser::token::END) {
40 tokens.push_back(tok);
41 }
42 return tokens.size();
43 }
44
45 int Count(TermID token_id) {
46 int result = 0;
47 for (size_t i = 0; i < tokens.size(); i++) {
48 if (tokens[i]->getTermID() == token_id) {
49 result++;
50 }
51 }
52 return result;
53 }
54
55 std::string Dump() {
56 std::string result;
57 for (int i = 0; i < tokens.size(); i++) {
58 result += tokens[i]->m_text;
59 result += ":";
60 result += toString(tokens[i]->m_id);
61 result += " ";
62 }
63 return result;
64 }
65
66};
67
68TEST_F(Lexer, Word) {
69 ASSERT_EQ(1, TokenParse("alpha "));
70 EXPECT_EQ(1, Count(TermID::NAME));
71 EXPECT_STREQ("alpha", tokens[0]->getText().c_str());
72
73
74 ASSERT_EQ(2, TokenParse("буквы ещёЁ_99"));
75 EXPECT_EQ(2, Count(TermID::NAME));
76 EXPECT_STREQ("буквы", tokens[0]->getText().c_str());
77 EXPECT_STREQ("ещёЁ_99", tokens[1]->getText().c_str());
78
79 ASSERT_EQ(3, TokenParse("one two \t three"));
80 EXPECT_EQ(3, Count(TermID::NAME));
81
82 EXPECT_STREQ("one", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
83 EXPECT_STREQ("two", tokens[1]->getText().c_str()) << tokens[1]->getText().c_str();
84 EXPECT_STREQ("three", tokens[2]->getText().c_str()) << tokens[2]->getText().c_str();
85}
86
87TEST_F(Lexer, Template) {
88 ASSERT_EQ(1, TokenParse("\"\"\"\"\"\""));
89 EXPECT_EQ(1, Count(TermID::TEMPLATE));
90 EXPECT_STREQ("", tokens[0]->getText().c_str()) << tokens[0]->getText();
91}
92
93TEST_F(Lexer, Template2) {
94 ASSERT_EQ(1, TokenParse("\"\"\" ${123} \n \"\"\""));
95 EXPECT_EQ(1, Count(TermID::TEMPLATE));
96 EXPECT_STREQ(" ${123} \n ", tokens[0]->getText().c_str()) << tokens[0]->getText();
97}
98
99TEST_F(Lexer, Template3) {
100 ASSERT_EQ(1, TokenParse("''' ${123} \n\t '''"));
101 EXPECT_EQ(1, Count(TermID::TEMPLATE));
102 EXPECT_STREQ(" ${123} \n\t ", tokens[0]->getText().c_str()) << tokens[0]->getText();
103}
104
105TEST_F(Lexer, StringEmpty) {
106 ASSERT_EQ(0, TokenParse(""));
107 ASSERT_EQ(1, TokenParse("''"));
108 EXPECT_EQ(1, Count(TermID::STRCHAR));
109 EXPECT_STREQ("", tokens[0]->getText().c_str()) << tokens[0]->getText();
110}
111
112TEST_F(Lexer, StringEmpty2) {
113 ASSERT_EQ(0, TokenParse(""));
114 ASSERT_EQ(1, TokenParse("\"\""));
115 EXPECT_EQ(1, Count(TermID::STRWIDE));
116 EXPECT_STREQ("", tokens[0]->getText().c_str()) << tokens[0]->getText();
117}
118
119TEST_F(Lexer, StringSimple) {
120 ASSERT_EQ(1, TokenParse("' '"));
121 EXPECT_EQ(1, Count(TermID::STRCHAR));
122 EXPECT_STREQ(" ", tokens[0]->getText().c_str()) << tokens[0]->getText();
123}
124
125TEST_F(Lexer, StringSimple2) {
126 ASSERT_EQ(1, TokenParse("\" \""));
127 EXPECT_EQ(1, Count(TermID::STRWIDE));
128 EXPECT_STREQ(" ", tokens[0]->getText().c_str()) << tokens[0]->getText();
129}
130
131TEST_F(Lexer, FullString) {
132 ASSERT_EQ(1, TokenParse("' \t \xFF\r\\''"));
133 EXPECT_EQ(1, Count(TermID::STRCHAR));
134 EXPECT_STREQ(" \t \xFF\r'", tokens[0]->getText().c_str()) << tokens[0]->getText();
135}
136
137TEST_F(Lexer, FullString2) {
138 ASSERT_EQ(1, TokenParse("\" \t \xFF\r\\\"\""));
139 EXPECT_EQ(1, Count(TermID::STRWIDE));
140 EXPECT_STREQ(" \t \xFF\r\"", tokens[0]->getText().c_str()) << tokens[0]->getText();
141}
142
143TEST_F(Lexer, Integer) {
144 ASSERT_EQ(1, TokenParse("123456"));
145 EXPECT_EQ(1, Count(TermID::INTEGER)) << newlang::toString(tokens[0]->getTermID());
146
147 EXPECT_STREQ("123456", tokens[0]->getText().c_str());
148
149 ASSERT_EQ(3, TokenParse("123456 * 123"));
150 EXPECT_EQ(1, Count(TermID::SYMBOL)) << Dump();
151 EXPECT_EQ(2, Count(TermID::INTEGER)) << Dump();
152
153 EXPECT_STREQ("123456", tokens[0]->getText().c_str()) << tokens[0]->getText();
154 EXPECT_STREQ("*", tokens[1]->getText().c_str()) << tokens[1]->getText();
155 EXPECT_STREQ("123", tokens[2]->getText().c_str()) << tokens[2]->getText();
156}
157
158TEST_F(Lexer, Float) {
159 ASSERT_EQ(1, TokenParse("1.e10"));
160 EXPECT_EQ(1, Count(TermID::NUMBER));
161 EXPECT_STREQ("1.e10", tokens[0]->getText().c_str());
162}
163
164//TEST_F(Lexer, Complex0) {
165// if(1 != Parse("-1j-0.2")) {
166// for (auto elem : tokens) {
167// std::cout << newlang::toString(elem->m_id) << " " << elem->m_text << "\n";
168// }
169// }
170// ASSERT_EQ(1, tokens.size());
171// EXPECT_EQ(1, Count(TermID::COMPLEX)) << newlang::toString(tokens[0]->m_id);
172// EXPECT_STREQ("-1j-0.2", tokens[0]->getText().c_str());
173//}
174//
175//TEST_F(Lexer, Complex1) {
176// if(1 != Parse("1.333+0.e10j")) {
177// for (auto elem : tokens) {
178// std::cout << newlang::toString(elem->m_id) << " " << elem->m_text << "\n";
179// }
180// }
181// ASSERT_EQ(1, tokens.size());
182// EXPECT_EQ(1, Count(TermID::COMPLEX));
183// EXPECT_STREQ("1.333+0.e10j", tokens[0]->getText().c_str());
184//}
185
186TEST_F(Lexer, Term) {
187
188 if (1 != TokenParse("$alpha ")) {
189 for (auto elem : tokens) {
190 std::cout << newlang::toString(elem->m_id) << " " << elem->m_text << "\n";
191 }
192
193 }
194 ASSERT_EQ(1, tokens.size());
195 EXPECT_EQ(1, Count(TermID::LOCAL)) << Dump();
196 EXPECT_STREQ("$alpha", tokens[0]->getText().c_str());
197
198
199 ASSERT_EQ(2, TokenParse("буквы ещёЁ_99"));
200 EXPECT_EQ(2, Count(TermID::NAME));
201 EXPECT_STREQ("буквы", tokens[0]->getText().c_str());
202 EXPECT_STREQ("ещёЁ_99", tokens[1]->getText().c_str());
203
204 ASSERT_EQ(5, TokenParse("one \\two \\\\two \t $three @four")) << Dump();
205 EXPECT_EQ(1, Count(TermID::NAME)) << Dump();
206 EXPECT_EQ(1, Count(TermID::LOCAL)) << Dump();
207 EXPECT_EQ(1, Count(TermID::MACRO)) << Dump();
208 EXPECT_EQ(2, Count(TermID::MODULE)) << Dump();
209
210 EXPECT_STREQ("one", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
211 EXPECT_STREQ("\\two", tokens[1]->getText().c_str()) << tokens[1]->getText().c_str();
212 EXPECT_STREQ("\\\\two", tokens[2]->getText().c_str()) << tokens[2]->getText().c_str();
213 EXPECT_STREQ("$three", tokens[3]->getText().c_str()) << tokens[3]->getText().c_str();
214 EXPECT_STREQ("@four", tokens[4]->getText().c_str()) << tokens[4]->getText().c_str();
215}
216
217TEST_F(Lexer, AssignEq) {
218 ASSERT_EQ(3, TokenParse("token=ssssssss"));
219 EXPECT_EQ(2, Count(TermID::NAME));
220 EXPECT_EQ(1, Count(TermID::SYMBOL));
221
222 EXPECT_STREQ("token", tokens[0]->getText().c_str()) << tokens[0]->getText();
223 EXPECT_STREQ("ssssssss", tokens[2]->getText().c_str()) << tokens[2]->getText();
224
225 ASSERT_EQ(3, TokenParse("token:=\"ssssssss\""));
226 EXPECT_EQ(1, Count(TermID::NAME));
227 EXPECT_EQ(1, Count(TermID::CREATE_OVERLAP));
228 EXPECT_EQ(1, Count(TermID::STRWIDE));
229
230 EXPECT_STREQ("token", tokens[0]->getText().c_str()) << tokens[0]->getText();
231 EXPECT_STREQ("ssssssss", tokens[2]->getText().c_str()) << tokens[2]->getText();
232
233 ASSERT_EQ(3, TokenParse(" token \t ::= 'ssssssss' "));
234 EXPECT_EQ(1, Count(TermID::NAME));
235 EXPECT_EQ(1, Count(TermID::CREATE_ONCE));
236 EXPECT_EQ(1, Count(TermID::STRCHAR));
237
238 EXPECT_STREQ("token", tokens[0]->getText().c_str()) << tokens[0]->getText();
239 EXPECT_STREQ("ssssssss", tokens[2]->getText().c_str()) << tokens[2]->getText();
240}
241
242TEST_F(Lexer, CodeInner) {
243 ASSERT_EQ(3, TokenParse("{%if(){%} {%}else{%} {%} %}"));
244 EXPECT_EQ(3, Count(TermID::EMBED));
245 EXPECT_STREQ("if(){", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
246 EXPECT_STREQ("}else{", tokens[1]->getText().c_str()) << tokens[1]->getText().c_str();
247 EXPECT_STREQ("} ", tokens[2]->getText().c_str()) << tokens[2]->getText().c_str();
248
249 ASSERT_EQ(5, TokenParse("{ {%if(){%} {%}else{%} {%} %} }"));
250 EXPECT_EQ(2, Count(TermID::SYMBOL));
251 EXPECT_EQ(3, Count(TermID::EMBED));
252 EXPECT_STREQ("{", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
253 EXPECT_STREQ("if(){", tokens[1]->getText().c_str()) << tokens[1]->getText().c_str();
254 EXPECT_STREQ("}else{", tokens[2]->getText().c_str()) << tokens[2]->getText().c_str();
255 EXPECT_STREQ("} ", tokens[3]->getText().c_str()) << tokens[3]->getText().c_str();
256 EXPECT_STREQ("}", tokens[4]->getText().c_str()) << tokens[4]->getText().c_str();
257}
258
259TEST_F(Lexer, Code) {
260 ASSERT_EQ(2, TokenParse("{ }"));
261 EXPECT_EQ(2, Count(TermID::SYMBOL));
262 EXPECT_STREQ("{", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
263 EXPECT_STREQ("}", tokens[1]->getText().c_str()) << tokens[1]->getText().c_str();
264
265 ASSERT_EQ(4, TokenParse("{ { } }"));
266 EXPECT_EQ(4, Count(TermID::SYMBOL));
267}
268
269TEST_F(Lexer, CodeSource) {
270 ASSERT_EQ(1, TokenParse("{%%}"));
271 EXPECT_EQ(1, Count(TermID::EMBED));
272 EXPECT_STREQ("", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
273
274 ASSERT_EQ(1, TokenParse("{% % %}"));
275 ASSERT_EQ(1, Count(TermID::EMBED));
276 ASSERT_STREQ(" % ", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
277}
278
279TEST_F(Lexer, Assign) {
280 ASSERT_EQ(5, TokenParse(":= :- ::= ::- ="));
281 EXPECT_EQ(1, Count(TermID::CREATE_ONCE));
282 EXPECT_EQ(1, Count(TermID::CREATE_OVERLAP));
283 EXPECT_EQ(1, Count(TermID::PURE_ONCE));
284 EXPECT_EQ(1, Count(TermID::PURE_OVERLAP));
285}
286
287TEST_F(Lexer, Function) {
288 ASSERT_EQ(1, TokenParse("\\name")) << Dump();
289 EXPECT_EQ(1, Count(TermID::MODULE)) << toString(tokens[0]->getTermID());
290 EXPECT_STREQ("\\name", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
291
292 ASSERT_EQ(1, TokenParse("\\\\name"));
293 EXPECT_EQ(1, Count(TermID::MODULE)) << toString(tokens[0]->getTermID());
294 EXPECT_STREQ("\\\\name", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
295
296 ASSERT_EQ(1, TokenParse("$name"));
297 EXPECT_EQ(1, Count(TermID::LOCAL)) << toString(tokens[0]->getTermID());
298 EXPECT_STREQ("$name", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
299
300 ASSERT_EQ(2, TokenParse("%native"));
301 EXPECT_EQ(1, Count(TermID::SYMBOL)) << toString(tokens[0]->getTermID());
302 EXPECT_EQ(1, Count(TermID::NAME)) << toString(tokens[1]->getTermID());
303 EXPECT_STREQ("%", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
304 EXPECT_STREQ("native", tokens[1]->getText().c_str()) << tokens[1]->getText().c_str();
305
306 ASSERT_EQ(1, TokenParse("@name"));
307 EXPECT_EQ(1, Count(TermID::MACRO));
308 EXPECT_STREQ("@name", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
309
310 ASSERT_EQ(1, TokenParse("@функция_alpha_ёЁ"));
311 EXPECT_EQ(1, Count(TermID::MACRO));
312 EXPECT_STREQ("@функция_alpha_ёЁ", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
313}
314
315TEST_F(Lexer, Sentence) {
316 ASSERT_EQ(2, TokenParse("token."));
317 EXPECT_EQ(1, Count(TermID::NAME));
318 ASSERT_EQ(2, TokenParse("token;"));
319 EXPECT_EQ(1, Count(TermID::NAME));
320}
321
322TEST_F(Lexer, Comment) {
323 ASSERT_EQ(0, TokenParse("# lskdafj ldsjf ldkjfa l;sdj fl;k"));
324 ASSERT_EQ(0, TokenParse("#! lskdafj ldsjf ldkjfa l;sdj fl;k\n "));
325
326 ASSERT_EQ(1, TokenParse("/***** lskdafj\n\n\n\n ldsjf ldkjfa l;sdj fl;k*****/ "));
327 EXPECT_EQ(1, Count(TermID::DOC_BEFORE));
328
329 ASSERT_EQ(1, TokenParse("/// lskdafj"));
330 EXPECT_EQ(1, Count(TermID::DOC_BEFORE));
331 ASSERT_EQ(1, TokenParse("/// lskdafj\n"));
332 EXPECT_EQ(1, Count(TermID::DOC_BEFORE));
333 ASSERT_EQ(1, TokenParse("///< lskdafj"));
334 EXPECT_EQ(1, Count(TermID::DOC_AFTER));
335 ASSERT_EQ(1, TokenParse("///< lskdafj\n"));
336 EXPECT_EQ(1, Count(TermID::DOC_AFTER));
337}
338
339TEST_F(Lexer, Comment2) {
340 ASSERT_EQ(2, TokenParse("#!22\n#1\nterm;\n"));
341 // EXPECT_EQ(2, Count(TermID::COMMENT));
342 EXPECT_EQ(1, Count(TermID::SYMBOL));
343 EXPECT_EQ(1, Count(TermID::NAME));
344
345 ASSERT_EQ(1, TokenParse("\n\n/* \n\n*/\n\n term"));
346 // EXPECT_EQ(1, Count(TermID::COMMENT));
347 EXPECT_EQ(1, Count(TermID::NAME));
348 // EXPECT_STREQ(" \n\n", tokens[0]->getText().c_str()) << tokens[0]->getText().c_str();
349 // EXPECT_EQ(5, tokens[0]->m_line);
350 // EXPECT_EQ(1, tokens[0]->m_col);
351 EXPECT_STREQ("term", tokens[0]->getText().c_str()) << tokens[1]->getText().c_str();
352 EXPECT_EQ(7, tokens[0]->m_line) << Dump();
353 EXPECT_EQ(7, tokens[0]->m_col) << Dump();
354}
355
356TEST_F(Lexer, Paren) {
357 ASSERT_EQ(3, TokenParse("\\name()")) << Dump();
358 EXPECT_EQ(1, Count(TermID::MODULE));
359 EXPECT_EQ(2, Count(TermID::SYMBOL));
360
361
362 ASSERT_EQ(4, TokenParse("%функция_alpha_ёЁ ()"));
363 EXPECT_EQ(1, Count(TermID::NAME));
364 EXPECT_EQ(3, Count(TermID::SYMBOL));
365
366}
367
368TEST_F(Lexer, Module) {
369 ASSERT_EQ(1, TokenParse("\\name")) << Dump();
370 EXPECT_EQ(1, Count(TermID::MODULE));
371
372 ASSERT_EQ(1, TokenParse("\\\\dir\\module"));
373 EXPECT_EQ(1, Count(TermID::MODULE));
374
375 ASSERT_EQ(1, TokenParse("\\dir\\dir\\module"));
376 EXPECT_EQ(1, Count(TermID::MODULE));
377
378 ASSERT_EQ(3, TokenParse("\\name::var")) << Dump();
379 EXPECT_EQ(1, Count(TermID::MODULE));
380
381 ASSERT_EQ(5, TokenParse("\\\\dir\\module::var.filed")) << Dump();
382 EXPECT_EQ(1, Count(TermID::MODULE));
383
384 ASSERT_EQ(5, TokenParse("\\dir\\dir\\module::var.filed")) << Dump();
385 EXPECT_EQ(1, Count(TermID::MODULE));
386}
387
388TEST_F(Lexer, Arg) {
389 ASSERT_EQ(7, TokenParse("term(name=value);"));
390 EXPECT_EQ(3, Count(TermID::NAME));
391 EXPECT_EQ(4, Count(TermID::SYMBOL));
392}
393
394TEST_F(Lexer, Args) {
395 ASSERT_EQ(11, TokenParse("$0 $1 $22 $333 $4sss $sss1 -- ++ $* $^ ")) << Dump();
396 EXPECT_EQ(5, Count(TermID::ARGUMENT)) << Dump();
397 EXPECT_EQ(2, Count(TermID::ARGS)) << Dump();
398 EXPECT_EQ(1, Count(TermID::INT_PLUS)) << Dump();
399 EXPECT_EQ(1, Count(TermID::INT_MINUS)) << Dump();
400 EXPECT_EQ(1, Count(TermID::NAME)) << Dump();
401 EXPECT_EQ(1, Count(TermID::LOCAL)) << Dump();
402}
403
404TEST_F(Lexer, UTF8) {
405 ASSERT_EQ(7, TokenParse("термин(имя=значение);"));
406 EXPECT_EQ(3, Count(TermID::NAME)) << Dump();
407 EXPECT_EQ(4, Count(TermID::SYMBOL)) << Dump();
408}
409
410TEST_F(Lexer, ELLIPSIS) {
411 ASSERT_EQ(2, TokenParse("... ...")) << Dump();
412 EXPECT_EQ(2, Count(TermID::ELLIPSIS)) << Dump();
413}
414
415TEST_F(Lexer, Alias) {
416 ASSERT_EQ(5, TokenParse("+>:<-")) << Dump();
417 EXPECT_EQ(5, Count(TermID::SYMBOL)) << Dump();
418
419 ASSERT_EQ(4, TokenParse("@alias := @ALIAS;")) << Dump();
420 EXPECT_EQ(2, Count(TermID::MACRO)) << Dump();
421
422 ASSERT_EQ(7, TokenParse("/** Comment */@@ alias2 @@ ALIAS2@@///< Комментарий")) << Dump();
423 EXPECT_EQ(1, Count(TermID::DOC_BEFORE));
424 EXPECT_EQ(1, Count(TermID::DOC_AFTER));
425 EXPECT_EQ(2, Count(TermID::NAME));
426 EXPECT_EQ(1, tokens[0]->m_line) << Dump();
427 EXPECT_EQ(15, tokens[0]->m_col) << Dump();
428 EXPECT_EQ(1, tokens[1]->m_line) << Dump();
429 EXPECT_EQ(17, tokens[1]->m_col) << Dump();
430
431 ASSERT_EQ(2, TokenParse("/** Русские символы */name")) << Dump();
432 EXPECT_EQ(1, Count(TermID::DOC_BEFORE));
433 EXPECT_EQ(1, Count(TermID::NAME));
434 EXPECT_EQ(1, tokens[0]->m_line);
435 EXPECT_EQ(37, tokens[0]->m_col);
436 EXPECT_EQ(1, tokens[1]->m_line);
437 EXPECT_EQ(23 + 14 + 4, tokens[1]->m_col);
438}
439
440TEST_F(Lexer, Macro) {
441
442 ASSERT_EQ(1, TokenParse("@$arg")) << Dump();
443 EXPECT_EQ(1, Count(TermID::MACRO_ARGNAME)) << Dump();
444
445 ASSERT_EQ(1, TokenParse("@$1")) << Dump();
446 EXPECT_EQ(1, Count(TermID::MACRO_ARGPOS)) << Dump();
447
448 // ASSERT_EQ(1, TokenParse("@$name(*)")) << Dump();
449 // EXPECT_EQ(1, Count(TermID::MACRO_ARGUMENT));
450 // ASSERT_EQ(1, TokenParse("@$name[*]")) << Dump();
451 // EXPECT_EQ(1, Count(TermID::MACRO_ARGUMENT));
452 // ASSERT_EQ(1, TokenParse("@$name<*>")) << Dump();
453 // EXPECT_EQ(1, Count(TermID::MACRO_ARGUMENT));
454 //
455 // ASSERT_EQ(1, TokenParse("@$name(#)")) << Dump();
456 // EXPECT_EQ(1, Count(TermID::MACRO_ARGCOUNT));
457 // ASSERT_EQ(1, TokenParse("@$name[#]")) << Dump();
458 // EXPECT_EQ(1, Count(TermID::MACRO_ARGCOUNT));
459 // ASSERT_EQ(1, TokenParse("@$name<#>")) << Dump();
460 // EXPECT_EQ(1, Count(TermID::MACRO_ARGCOUNT));
461
462
463 ASSERT_EQ(1, TokenParse("@#")) << Dump();
464 EXPECT_EQ(1, Count(TermID::MACRO_TOSTR));
465
466 ASSERT_EQ(1, TokenParse("@#'")) << Dump();
467 EXPECT_EQ(1, Count(TermID::MACRO_TOSTR));
468 ASSERT_EQ(1, TokenParse("@#\"")) << Dump();
469 EXPECT_EQ(1, Count(TermID::MACRO_TOSTR));
470
471 ASSERT_EQ(1, TokenParse("@##")) << Dump();
472 EXPECT_EQ(1, Count(TermID::MACRO_CONCAT));
473
474 ASSERT_EQ(1, TokenParse("@$...")) << Dump();
475 EXPECT_EQ(1, Count(TermID::MACRO_ARGUMENT));
476 ASSERT_EQ(1, TokenParse("@$*")) << Dump();
477 EXPECT_EQ(1, Count(TermID::MACRO_ARGUMENT));
478 ASSERT_EQ(1, TokenParse("@$#")) << Dump();
479 EXPECT_EQ(1, Count(TermID::MACRO_ARGCOUNT));
480
481 ASSERT_EQ(7, TokenParse("@macro := @@123 ... 456@@")) << Dump();
482 EXPECT_EQ(1, Count(TermID::MACRO)) << Dump();
483 EXPECT_EQ(2, Count(TermID::MACRO_SEQ)) << Dump();
484
485 ASSERT_EQ(3, TokenParse("@macro := @@@123 ... 456@@@"));
486 EXPECT_EQ(1, Count(TermID::MACRO));
487 EXPECT_EQ(1, Count(TermID::MACRO_STR));
488 EXPECT_STREQ("@macro", tokens[0]->m_text.c_str());
489 EXPECT_STREQ("123 ... 456", tokens[2]->m_text.c_str());
490 EXPECT_EQ(1, tokens[0]->m_line) << Dump();
491 EXPECT_EQ(7, tokens[0]->m_col) << Dump();
492 EXPECT_EQ(1, tokens[2]->m_line) << Dump();
493 EXPECT_EQ(28, tokens[2]->m_col) << Dump();
494
495 ASSERT_EQ(6, TokenParse("@macro (name) := @@@123 \n \n ... 456@@@ # Комментарий"));
496 EXPECT_EQ(1, Count(TermID::NAME));
497 EXPECT_EQ(1, Count(TermID::MACRO));
498 EXPECT_EQ(2, Count(TermID::SYMBOL));
499 EXPECT_EQ(1, Count(TermID::MACRO_STR));
500 EXPECT_STREQ("@macro", tokens[0]->m_text.c_str());
501 EXPECT_STREQ("123 \n \n ... 456", tokens[5]->m_text.c_str());
502 EXPECT_EQ(1, tokens[0]->m_line);
503 EXPECT_EQ(7, tokens[0]->m_col);
504 EXPECT_EQ(3, tokens[5]->m_line);
505 EXPECT_EQ(12, tokens[5]->m_col);
506
507 ASSERT_EQ(11, TokenParse("@if($args) := @@ [@$args] --> @@")) << Dump();
508 EXPECT_EQ(1, Count(TermID::MACRO));
509 EXPECT_EQ(4, Count(TermID::SYMBOL));
510 EXPECT_EQ(1, Count(TermID::CREATE_OVERLAP));
511 EXPECT_EQ(2, Count(TermID::MACRO_SEQ));
512 EXPECT_EQ(1, Count(TermID::FOLLOW));
513 EXPECT_EQ(1, Count(TermID::MACRO_ARGNAME)) << Dump();
514}
515
516TEST_F(Lexer, Ignore) {
517 ASSERT_EQ(1, TokenParse("\\\\ ")) << Dump();
518 ASSERT_EQ(1, Count(TermID::MODULE)) << Dump();
519 EXPECT_STREQ("\\\\", tokens[0]->m_text.c_str());
520
521 ASSERT_EQ(2, TokenParse("\\\\ ", false)) << Dump();
522 ASSERT_EQ(1, Count(TermID::MODULE)) << Dump();
523 ASSERT_EQ(1, Count(TermID::SPACE)) << Dump();
524 EXPECT_STREQ("\\\\", tokens[0]->m_text.c_str());
525 EXPECT_STREQ(" ", tokens[1]->m_text.c_str());
526
527 ASSERT_EQ(2, TokenParse("\\\\ \t ", false)) << Dump();
528 ASSERT_EQ(1, Count(TermID::MODULE)) << Dump();
529 ASSERT_EQ(1, Count(TermID::SPACE)) << Dump();
530 EXPECT_STREQ("\\\\", tokens[0]->m_text.c_str());
531 EXPECT_STREQ(" \t ", tokens[1]->m_text.c_str());
532
533
534 ASSERT_EQ(2, TokenParse(" \\\\ \t \n", false)) << Dump();
535 ASSERT_EQ(1, Count(TermID::MODULE)) << Dump();
536 ASSERT_EQ(1, Count(TermID::SPACE)) << Dump();
537 EXPECT_STREQ("\\\\", tokens[0]->m_text.c_str());
538 EXPECT_STREQ(" \t ", tokens[1]->m_text.c_str());
539
540
541 ASSERT_EQ(5, TokenParse(" \\\\ \t \n\t\t", false, false, false, false)) << Dump();
542 ASSERT_EQ(1, Count(TermID::MODULE)) << Dump();
543 ASSERT_EQ(1, Count(TermID::SPACE)) << Dump();
544 ASSERT_EQ(2, Count(TermID::INDENT)) << Dump();
545 ASSERT_EQ(1, Count(TermID::CRLF)) << Dump();
546 EXPECT_STREQ(" ", tokens[0]->m_text.c_str());
547 EXPECT_STREQ("\\\\", tokens[1]->m_text.c_str());
548 EXPECT_STREQ(" \t ", tokens[2]->m_text.c_str());
549 EXPECT_STREQ("\n", tokens[3]->m_text.c_str());
550 EXPECT_STREQ("\t\t", tokens[4]->m_text.c_str());
551
552 ASSERT_EQ(4, TokenParse("/* /* */ */ # \n", false, false, false, false)) << Dump();
553 ASSERT_EQ(1, Count(TermID::SPACE)) << Dump();
554 ASSERT_EQ(2, Count(TermID::COMMENT)) << Dump();
555 ASSERT_EQ(1, Count(TermID::CRLF)) << Dump();
556 EXPECT_STREQ("/* /* */ */", tokens[0]->m_text.c_str());
557 EXPECT_STREQ(" ", tokens[1]->m_text.c_str());
558 EXPECT_STREQ("# ", tokens[2]->m_text.c_str());
559 EXPECT_STREQ("\n", tokens[3]->m_text.c_str());
560
561}
562
563TEST_F(Lexer, ParseLexem) {
564
565 BlockType arr = Scanner::ParseLexem("1 2 3 4 5");
566
567 ASSERT_EQ(5, arr.size()) << Macro::DumpText(arr).c_str();
568 ASSERT_STREQ("1 2 3 4 5", Macro::DumpText(arr).c_str());
569
570 arr = Scanner::ParseLexem("macro @test(1,2,3,...):type; next \n; # sssssss\n @only lexem((((;; ;");
571 ASSERT_STREQ("macro @test ( 1 , 2 , 3 , ... ) : type ; next ; @only lexem ( ( ( ( ; ; ;", Macro::DumpText(arr).c_str());
572}
573
574#endif // UNITTEST
int result
Definition lexer.l:367
Definition nlc.h:59
TermID
Definition term.h:119
std::shared_ptr< Term > TermPtr
Definition variable.h:33
std::vector< TermPtr > BlockType
Definition types.h:239
const char * toString(TermID type)
Definition term.h:126
std::string Dump(const T &iterable)
Definition logger.h:306