1 module source.dlexer;
2 
3 import source.context;
4 import source.location;
5 
6 enum TokenType {
7 	Invalid = 0,
8 	
9 	Begin,
10 	End,
11 	
12 	// Comments
13 	Comment,
14 	
15 	// Literals
16 	StringLiteral,
17 	CharacterLiteral,
18 	IntegerLiteral,
19 	FloatLiteral,
20 	
21 	// Identifier
22 	Identifier,
23 	
24 	// Keywords
25 	Abstract, Alias, Align, Asm, Assert, Auto,
26 	Body, Bool, Break, Byte,
27 	Case, Cast, Catch, Cdouble, Cent, Cfloat, Char,
28 	Class, Const, Continue, Creal,
29 	Dchar, Debug, Default, Delegate, Delete,
30 	Deprecated, Do, Double,
31 	Else, Enum, Export, Extern,
32 	False, Final, Finally, Float, For, Foreach,
33 	ForeachReverse, Function,
34 	Goto,
35 	Idouble, If, Ifloat, Immutable, Import, In,
36 	Inout, Int, Interface, Invariant, Ireal, Is,
37 	Lazy, Long,
38 	Macro, Mixin, Module,
39 	New, Nothrow, Null,
40 	Out, Override,
41 	Package, Pragma, Private, Protected, Public, Pure,
42 	Real, Ref, Return,
43 	Scope, Shared, Short, Static, Struct, Super,
44 	Switch, Synchronized,
45 	Template, This, Throw, True, Try, Typedef,
46 	Typeid, Typeof,
47 	Ubyte, Ucent, Uint, Ulong, Union, Unittest, Ushort,
48 	Version, Void, Volatile,
49 	Wchar, While, With,
50 	__File__, __Line__, __Gshared, __Traits, __Vector, __Parameters,
51 	
52 	// Operators.
53 	Slash,              // /
54 	SlashEqual,         // /=
55 	Dot,                // .
56 	DotDot,             // ..
57 	DotDotDot,          // ...
58 	Ampersand,          // &
59 	AmpersandEqual,     // &=
60 	AmpersandAmpersand, // &&
61 	Pipe,               // |
62 	PipeEqual,          // |=
63 	PipePipe,           // ||
64 	Minus,              // -
65 	MinusEqual,         // -=
66 	MinusMinus,         // --
67 	Plus,               // +
68 	PlusEqual,          // +=
69 	PlusPlus,           // ++
70 	Less,               // <
71 	LessEqual,          // <=
72 	LessLess,           // <<
73 	LessLessEqual,      // <<=
74 	LessMore,           // <>
75 	LessMoreEqual,      // <>=
76 	More,               // >
77 	MoreEqual,          // >=
78 	MoreMoreEqual,      // >>=
79 	MoreMoreMoreEqual,  // >>>=
80 	MoreMore,           // >>
81 	MoreMoreMore,       // >>>
82 	Bang,               // !
83 	BangEqual,          // !=
84 	BangLessMore,       // !<>
85 	BangLessMoreEqual,  // !<>=
86 	BangLess,           // !<
87 	BangLessEqual,      // !<=
88 	BangMore,           // !>
89 	BangMoreEqual,      // !>=
90 	OpenParen,          // (
91 	CloseParen,         // )
92 	OpenBracket,        // [
93 	CloseBracket,       // ]
94 	OpenBrace,          // {
95 	CloseBrace,         // }
96 	QuestionMark,       // ?
97 	Comma,              // ,
98 	Semicolon,          // ;
99 	Colon,              // :
100 	Dollar,             // $
101 	Equal,              // =
102 	EqualEqual,         // ==
103 	Star,               // *
104 	StarEqual,          // *=
105 	Percent,            // %
106 	PercentEqual,       // %=
107 	Caret,              // ^
108 	CaretEqual,         // ^=
109 	CaretCaret,         // ^^
110 	CaretCaretEqual,    // ^^=
111 	Tilde,              // ~
112 	TildeEqual,         // ~=
113 	At,                 // @
114 	EqualMore,          // =>
115 	Hash,               // #
116 }
117 
118 struct Token {
119 	import source.location;
120 	Location location;
121 	
122 	TokenType type;
123 	
124 	import source.name;
125 	Name name;
126 	
127 	import source.context;
128 	string toString(Context context) {
129 		return (type >= TokenType.Identifier)
130 			? name.toString(context)
131 			: location.getFullLocation(context).getSlice();
132 	}
133 }
134 
135 auto lex(Position base, Context context) {
136 	auto lexer = TokenRange();
137 	
138 	lexer.content = base.getFullPosition(context).getSource().getContent();
139 	lexer.t.type = TokenType.Begin;
140 	
141 	lexer.context = context;
142 	lexer.base = base;
143 	lexer.previous = base;
144 	
145 	// Pop #!
146 	lexer.popSheBang();
147 	
148 	lexer.t.location =  Location(base, base.getWithOffset(lexer.index));
149 	return lexer;
150 }
151 
152 alias TokenRange = DLexer;
153 
154 struct DLexer {
155 	enum BaseMap = () {
156 		auto ret = [
157 			// Comments
158 			"//" : "?tokenizeComments:lexComment|popComment",
159 			"/*" : "?tokenizeComments:lexComment|popComment",
160 			"/+" : "?tokenizeComments:lexComment|popComment",
161 			
162 			// Integer literals.
163 			"0b" : "lexNumeric",
164 			"0B" : "lexNumeric",
165 			"0x" : "lexNumeric",
166 			"0X" : "lexNumeric",
167 			
168 			// String literals.
169 			"`"   : "lexString",
170 			`"`   : "lexString",
171 			"q{"  : "lexDString",
172 			`q"`  : "lexDString",
173 			`q"(` : "lexDString",
174 			`q"[` : "lexDString",
175 			`q"{` : "lexDString",
176 			`q"<` : "lexDString",
177 			`r"`  : "lexDString",
178 			
179 			// Character literals.
180 			"'" : "lexCharacter",
181 		];
182 		
183 		foreach (i; 0 .. 10) {
184 			import std.conv;
185 			ret[to!string(i)] = "lexNumeric";
186 		}
187 		
188 		return ret;
189 	}();
190 	
191 	import source.lexbase;
192 	mixin LexBaseImpl!(Token, BaseMap, getKeywordsMap(), getOperatorsMap());
193 	
194 	void popSheBang() {
195 		auto c = frontChar;
196 		if (c == '#') {
197 			while (c != '\n') {
198 				popChar();
199 				c = frontChar;
200 			}
201 		}
202 	}
203 	
204 	import source.lexnumeric;
205 	mixin LexNumericImpl!(Token, [
206 		"" : TokenType.IntegerLiteral,
207 		"u": TokenType.IntegerLiteral,
208 		"U": TokenType.IntegerLiteral,
209 		"ul": TokenType.IntegerLiteral,
210 		"uL": TokenType.IntegerLiteral,
211 		"Ul": TokenType.IntegerLiteral,
212 		"UL": TokenType.IntegerLiteral,
213 		"l": TokenType.IntegerLiteral,
214 		"L": TokenType.IntegerLiteral,
215 		"lu": TokenType.IntegerLiteral,
216 		"lU": TokenType.IntegerLiteral,
217 		"Lu": TokenType.IntegerLiteral,
218 		"LU": TokenType.IntegerLiteral,
219 		"f": TokenType.FloatLiteral,
220 		"F": TokenType.FloatLiteral,
221 	], [
222 		"" : TokenType.FloatLiteral,
223 		"f": TokenType.FloatLiteral,
224 		"F": TokenType.FloatLiteral,
225 		"L": TokenType.FloatLiteral,
226 	], null, [
227 		"l": "lexFloatSuffixError",
228 	]);
229 	
230 	auto lexFloatSuffixError(string s : "l")(uint begin, uint prefixStart) {
231 		return getError(begin, "Use 'L' suffix instead of 'l'.");
232 	}
233 	
234 	import source.lexstring;
235 	mixin LexStringImpl!(Token, [
236 		"" : TokenType.StringLiteral,
237 		"c" : TokenType.StringLiteral,
238 		"w" : TokenType.StringLiteral,
239 		"d" : TokenType.StringLiteral,
240 	]);
241 	
242 	Token lexDString(string s : `r"`)() {
243 		uint l = s.length;
244 		return lexRawString!'"'(index - l);
245 	}
246 	
247 	Token lexDString(string s : "q{")() {
248 		uint begin = index - 2;
249 		uint start = index;
250 		
251 		auto lookahead = getLookahead();
252 		
253 		uint level = 1;
254 		while (level > 0) {
255 			lookahead.popFront();
256 			auto lt = lookahead.front;
257 			
258 			switch (lt.type) with (TokenType) {
259 				case Invalid:
260 					// Bubble up errors.
261 					index = lookahead.index;
262 					return lt;
263 				
264 				case End:
265 					index = lookahead.index - 1;
266 					return getError(begin, "Unexpected end of file.");
267 				
268 				case OpenBrace:
269 					level++;
270 					break;
271 				
272 				case CloseBrace:
273 					level--;
274 					break;
275 				
276 				default:
277 					break;
278 			}
279 		}
280 		
281 		index = lookahead.index;
282 		return buildRawString(begin, start, index - 1);
283 	}
284 
285 	Token lexQDelimintedString(char delimiter) in {
286 		assert(delimiter != '"');
287 	} do {
288 		uint begin = index - 3;
289 		uint start = index;
290 
291 		// This is not technically correct, but the actual value of
292 		// previous doesn't matter when the delimiter isn't '"'.
293 		char previous = frontChar;
294 		char c = previous;
295 
296 		while (c != '\0' && (c != '"' || previous != delimiter)) {
297 			popChar();
298 			previous = c;
299 			c = frontChar;
300 		}
301 
302 		if (c == '\0') {
303 			return getError(begin, "Unexpected end of file.");
304 		}
305 
306 		popChar();
307 		return buildRawString(begin, start, index - 2);
308 	}
309 
310 	Token lexDString(string s : `q"(`)() {
311 		return lexQDelimintedString(')');
312 	}
313 
314 	Token lexDString(string s : `q"[`)() {
315 		return lexQDelimintedString(']');
316 	}
317 
318 	Token lexDString(string s : `q"{`)() {
319 		return lexQDelimintedString('}');
320 	}
321 
322 	Token lexDString(string s : `q"<`)() {
323 		return lexQDelimintedString('>');
324 	}
325 
326 	Token lexDString(string s : `q"`)() {
327 		uint idstart = index;
328 		uint begin = index - 2;
329 
330 		Token t = lexIdentifier();
331 		if (t.type == TokenType.Invalid) {
332 			// If this is an error, pass it on!
333 			return t;
334 		}
335 
336 		auto id = content[idstart .. index];
337 
338 		if (frontChar == '\r') {
339 			// Be nice to the Windows minions out there.
340 			popChar();
341 		}
342 
343 		if (frontChar != '\n') {
344 			return getError(begin, "Identifier must be followed by a new line.");
345 		}
346 
347 		popChar();
348 
349 		uint start = index;
350 		char c = frontChar;
351 
352 		// Skip the inital chars where a match is not possible.
353 		for (size_t i = 0; c != '\0' && i < id.length; i++) {
354 			popChar();
355 			c = frontChar;
356 		}
357 
358 		while (true) {
359 			while (c != '\0' && c != '"')  {
360 				popChar();
361 				c = frontChar;
362 			}
363 
364 			if (c == '\0') {
365 				return getError(begin, "Unexpected end of file.");
366 			}
367 
368 			scope(success) {
369 				popChar();
370 			}
371 
372 			if (content[index - id.length - 1] != '\n') {
373 				continue;
374 			}
375 
376 			for (size_t i = 0; c != '\0' && i < id.length; i++) {
377 				if (content[index - id.length + i] != id[i]) {
378 					continue;
379 				}
380 			}
381 
382 			// We found our guy.
383 			break;
384 		}
385 
386 		return buildRawString(begin, start, index - id.length - 1);
387 	}
388 }
389 
390 auto getOperatorsMap() {
391 	//with(TokenType): currently isn't working https://issues.dlang.org/show_bug.cgi?id=14332
392 	with(TokenType)
393 	return [
394 		"/"    : Slash,
395 		"/="   : SlashEqual,
396 		"."    : Dot,
397 		".."   : DotDot,
398 		"..."  : DotDotDot,
399 		"&"    : Ampersand,
400 		"&="   : AmpersandEqual,
401 		"&&"   : AmpersandAmpersand,
402 		"|"    : Pipe,
403 		"|="   : PipeEqual,
404 		"||"   : PipePipe,
405 		"-"    : Minus,
406 		"-="   : MinusEqual,
407 		"--"   : MinusMinus,
408 		"+"    : Plus,
409 		"+="   : PlusEqual,
410 		"++"   : PlusPlus,
411 		"<"    : Less,
412 		"<="   : LessEqual,
413 		"<<"   : LessLess,
414 		"<<="  : LessLessEqual,
415 		"<>"   : LessMore,
416 		"<>="  : LessMoreEqual,
417 		">"    : More,
418 		">="   : MoreEqual,
419 		">>="  : MoreMoreEqual,
420 		">>>=" : MoreMoreMoreEqual,
421 		">>"   : MoreMore,
422 		">>>"  : MoreMoreMore,
423 		"!"    : Bang,
424 		"!="   : BangEqual,
425 		"!<>"  : BangLessMore,
426 		"!<>=" : BangLessMoreEqual,
427 		"!<"   : BangLess,
428 		"!<="  : BangLessEqual,
429 		"!>"   : BangMore,
430 		"!>="  : BangMoreEqual,
431 		"("    : OpenParen,
432 		")"    : CloseParen,
433 		"["    : OpenBracket,
434 		"]"    : CloseBracket,
435 		"{"    : OpenBrace,
436 		"}"    : CloseBrace,
437 		"?"    : QuestionMark,
438 		","    : Comma,
439 		";"    : Semicolon,
440 		":"    : Colon,
441 		"$"    : Dollar,
442 		"="    : Equal,
443 		"=="   : EqualEqual,
444 		"*"    : Star,
445 		"*="   : StarEqual,
446 		"%"    : Percent,
447 		"%="   : PercentEqual,
448 		"^"    : Caret,
449 		"^="   : CaretEqual,
450 		"^^"   : CaretCaret,
451 		"^^="  : CaretCaretEqual,
452 		"~"    : Tilde,
453 		"~="   : TildeEqual,
454 		"@"    : At,
455 		"=>"   : EqualMore,
456 		"#"    : Hash,
457 		"\0"   : End,
458 	];
459 }
460 
461 auto getKeywordsMap() {
462 	//with(TokenType): currently isn't working https://issues.dlang.org/show_bug.cgi?id=14332
463 	with(TokenType)
464 	return [
465 		"abstract"        : Abstract,
466 		"alias"           : Alias,
467 		"align"           : Align,
468 		"asm"             : Asm,
469 		"assert"          : Assert,
470 		"auto"            : Auto,
471 		"body"            : Body,
472 		"bool"            : Bool,
473 		"break"           : Break,
474 		"byte"            : Byte,
475 		"case"            : Case,
476 		"cast"            : Cast,
477 		"catch"           : Catch,
478 		"cent"            : Cent,
479 		"char"            : Char,
480 		"class"           : Class,
481 		"const"           : Const,
482 		"continue"        : Continue,
483 		"dchar"           : Dchar,
484 		"debug"           : Debug,
485 		"default"         : Default,
486 		"delegate"        : Delegate,
487 		"deprecated"      : Deprecated,
488 		"do"              : Do,
489 		"double"          : Double,
490 		"else"            : Else,
491 		"enum"            : Enum,
492 		"export"          : Export,
493 		"extern"          : Extern,
494 		"false"           : False,
495 		"final"           : Final,
496 		"finally"         : Finally,
497 		"float"           : Float,
498 		"for"             : For,
499 		"foreach"         : Foreach,
500 		"foreach_reverse" : ForeachReverse,
501 		"function"        : Function,
502 		"goto"            : Goto,
503 		"if"              : If,
504 		"immutable"       : Immutable,
505 		"import"          : Import,
506 		"in"              : In,
507 		"inout"           : Inout,
508 		"int"             : Int,
509 		"interface"       : Interface,
510 		"invariant"       : Invariant,
511 		"is"              : Is,
512 		"lazy"            : Lazy,
513 		"long"            : Long,
514 		"macro"           : Macro,
515 		"mixin"           : Mixin,
516 		"module"          : Module,
517 		"new"             : New,
518 		"nothrow"         : Nothrow,
519 		"null"            : Null,
520 		"out"             : Out,
521 		"override"        : Override,
522 		"package"         : Package,
523 		"pragma"          : Pragma,
524 		"private"         : Private,
525 		"protected"       : Protected,
526 		"public"          : Public,
527 		"pure"            : Pure,
528 		"real"            : Real,
529 		"ref"             : Ref,
530 		"return"          : Return,
531 		"scope"           : Scope,
532 		"shared"          : Shared,
533 		"short"           : Short,
534 		"static"          : Static,
535 		"struct"          : Struct,
536 		"super"           : Super,
537 		"switch"          : Switch,
538 		"synchronized"    : Synchronized,
539 		"template"        : Template,
540 		"this"            : This,
541 		"throw"           : Throw,
542 		"true"            : True,
543 		"try"             : Try,
544 		"typeid"          : Typeid,
545 		"typeof"          : Typeof,
546 		"ubyte"           : Ubyte,
547 		"ucent"           : Ucent,
548 		"uint"            : Uint,
549 		"ulong"           : Ulong,
550 		"union"           : Union,
551 		"unittest"        : Unittest,
552 		"ushort"          : Ushort,
553 		"version"         : Version,
554 		"void"            : Void,
555 		"volatile"        : Volatile,
556 		"wchar"           : Wchar,
557 		"while"           : While,
558 		"with"            : With,
559 		"__FILE__"        : __File__,
560 		"__LINE__"        : __Line__,
561 		"__gshared"       : __Gshared,
562 		"__traits"        : __Traits,
563 		"__vector"        : __Vector,
564 		"__parameters"    : __Parameters,
565 	];
566 }
567 
568 unittest {
569 	auto context = new Context();
570 	
571 	auto testlexer(string s) {
572 		import source.name;
573 		auto base = context.registerMixin(Location.init, s ~ '\0');
574 		return lex(base, context);
575 	}
576 	
577 	import source.parserutil;
578 	
579 	{
580 		auto lex = testlexer("");
581 		lex.match(TokenType.Begin);
582 		assert(lex.front.type == TokenType.End);
583 	}
584 	
585 	{
586 		auto lex = testlexer("a");
587 		lex.match(TokenType.Begin);
588 		
589 		auto t = lex.front;
590 		
591 		assert(t.type == TokenType.Identifier);
592 		assert(t.name.toString(context) == "a");
593 		lex.popFront();
594 		
595 		assert(lex.front.type == TokenType.End);
596 	}
597 	
598 	{
599 		auto lex = testlexer("_");
600 		lex.match(TokenType.Begin);
601 		
602 		auto t = lex.front;
603 		
604 		assert(t.type == TokenType.Identifier);
605 		assert(t.name.toString(context) == "_");
606 		lex.popFront();
607 		
608 		assert(lex.front.type == TokenType.End);
609 	}
610 	
611 	{
612 		auto lex = testlexer("_0");
613 		lex.match(TokenType.Begin);
614 		
615 		auto t = lex.front;
616 		
617 		assert(t.type == TokenType.Identifier);
618 		assert(t.name.toString(context) == "_0");
619 		lex.popFront();
620 		
621 		assert(lex.front.type == TokenType.End);
622 	}
623 	
624 	{
625 		auto lex = testlexer("0b0");
626 		lex.match(TokenType.Begin);
627 		lex.match(TokenType.IntegerLiteral);
628 		assert(lex.front.type == TokenType.End);
629 	}
630 	
631 	{
632 		auto lex = testlexer("0b_0");
633 		lex.match(TokenType.Begin);
634 		lex.match(TokenType.IntegerLiteral);
635 		assert(lex.front.type == TokenType.End);
636 	}
637 	
638 	{
639 		auto lex = testlexer("0b_0_");
640 		lex.match(TokenType.Begin);
641 		lex.match(TokenType.IntegerLiteral);
642 		assert(lex.front.type == TokenType.End);
643 	}
644 	
645 	{
646 		auto lex = testlexer("0b_");
647 		lex.match(TokenType.Begin);
648 		lex.match(TokenType.Invalid);
649 		assert(lex.front.type == TokenType.End);
650 	}
651 	
652 	{
653 		auto lex = testlexer("0x0");
654 		lex.match(TokenType.Begin);
655 		lex.match(TokenType.IntegerLiteral);
656 		assert(lex.front.type == TokenType.End);
657 	}
658 	
659 	{
660 		auto lex = testlexer("0x_0");
661 		lex.match(TokenType.Begin);
662 		lex.match(TokenType.IntegerLiteral);
663 		assert(lex.front.type == TokenType.End);
664 	}
665 	
666 	{
667 		auto lex = testlexer("0x_0_");
668 		lex.match(TokenType.Begin);
669 		lex.match(TokenType.IntegerLiteral);
670 		assert(lex.front.type == TokenType.End);
671 	}
672 	
673 	{
674 		auto lex = testlexer("0x_");
675 		lex.match(TokenType.Begin);
676 		lex.match(TokenType.Invalid);
677 		assert(lex.front.type == TokenType.End);
678 	}
679 	
680 	{
681 		auto lex = testlexer("_0");
682 		lex.match(TokenType.Begin);
683 		
684 		auto t = lex.front;
685 		
686 		assert(t.type == TokenType.Identifier);
687 		assert(t.name.toString(context) == "_0");
688 		lex.popFront();
689 		
690 		assert(lex.front.type == TokenType.End);
691 	}
692 	
693 	{
694 		auto lex = testlexer("é");
695 		lex.match(TokenType.Begin);
696 		
697 		auto t = lex.front;
698 		
699 		assert(t.type == TokenType.Identifier);
700 		assert(t.name.toString(context) == "é");
701 		lex.popFront();
702 		
703 		assert(lex.front.type == TokenType.End);
704 	}
705 	
706 	{
707 		auto lex = testlexer("Γαῖα");
708 		lex.match(TokenType.Begin);
709 		
710 		auto t = lex.front;
711 		
712 		assert(t.type == TokenType.Identifier);
713 		assert(t.name.toString(context) == "Γαῖα");
714 		lex.popFront();
715 		
716 		assert(lex.front.type == TokenType.End);
717 	}
718 	
719 	{
720 		auto lex = testlexer("🙈🙉🙊");
721 		lex.match(TokenType.Begin);
722 		lex.match(TokenType.Invalid);
723 		lex.match(TokenType.Invalid);
724 		lex.match(TokenType.Invalid);
725 		assert(lex.front.type == TokenType.End);
726 	}
727 	
728 	{
729 		auto lex = testlexer("0");
730 		lex.match(TokenType.Begin);
731 		lex.match(TokenType.IntegerLiteral);
732 		assert(lex.front.type == TokenType.End);
733 	}
734 	
735 	{
736 		auto lex = testlexer("1");
737 		lex.match(TokenType.Begin);
738 		lex.match(TokenType.IntegerLiteral);
739 		assert(lex.front.type == TokenType.End);
740 	}
741 	
742 	{
743 		auto lex = testlexer("1.");
744 		lex.match(TokenType.Begin);
745 		lex.match(TokenType.FloatLiteral);
746 		assert(lex.front.type == TokenType.End);
747 	}
748 	
749 	{
750 		auto lex = testlexer("1.0");
751 		lex.match(TokenType.Begin);
752 		lex.match(TokenType.FloatLiteral);
753 		assert(lex.front.type == TokenType.End);
754 	}
755 	
756 	{
757 		auto lex = testlexer("1. 0");
758 		lex.match(TokenType.Begin);
759 		lex.match(TokenType.FloatLiteral);
760 		lex.match(TokenType.IntegerLiteral);
761 		assert(lex.front.type == TokenType.End);
762 	}
763 	
764 	{
765 		auto lex = testlexer("1..");
766 		lex.match(TokenType.Begin);
767 		lex.match(TokenType.IntegerLiteral);
768 		lex.match(TokenType.DotDot);
769 		assert(lex.front.type == TokenType.End);
770 	}
771 	
772 	{
773 		auto lex = testlexer("1 .");
774 		lex.match(TokenType.Begin);
775 		lex.match(TokenType.IntegerLiteral);
776 		lex.match(TokenType.Dot);
777 		assert(lex.front.type == TokenType.End);
778 	}
779 	
780 	{
781 		auto lex = testlexer("1u");
782 		lex.match(TokenType.Begin);
783 		lex.match(TokenType.IntegerLiteral);
784 		assert(lex.front.type == TokenType.End);
785 	}
786 	
787 	{
788 		auto lex = testlexer("1U");
789 		lex.match(TokenType.Begin);
790 		lex.match(TokenType.IntegerLiteral);
791 		assert(lex.front.type == TokenType.End);
792 	}
793 	
794 	{
795 		auto lex = testlexer("1l");
796 		lex.match(TokenType.Begin);
797 		lex.match(TokenType.IntegerLiteral);
798 		assert(lex.front.type == TokenType.End);
799 	}
800 	
801 	{
802 		auto lex = testlexer("1L");
803 		lex.match(TokenType.Begin);
804 		lex.match(TokenType.IntegerLiteral);
805 		assert(lex.front.type == TokenType.End);
806 	}
807 	
808 	{
809 		auto lex = testlexer("1ul");
810 		lex.match(TokenType.Begin);
811 		lex.match(TokenType.IntegerLiteral);
812 		assert(lex.front.type == TokenType.End);
813 	}
814 	
815 	{
816 		auto lex = testlexer("1uL");
817 		lex.match(TokenType.Begin);
818 		lex.match(TokenType.IntegerLiteral);
819 		assert(lex.front.type == TokenType.End);
820 	}
821 	
822 	{
823 		auto lex = testlexer("1Ul");
824 		lex.match(TokenType.Begin);
825 		lex.match(TokenType.IntegerLiteral);
826 		assert(lex.front.type == TokenType.End);
827 	}
828 	
829 	{
830 		auto lex = testlexer("1UL");
831 		lex.match(TokenType.Begin);
832 		lex.match(TokenType.IntegerLiteral);
833 		assert(lex.front.type == TokenType.End);
834 	}
835 	
836 	{
837 		auto lex = testlexer("1lu");
838 		lex.match(TokenType.Begin);
839 		lex.match(TokenType.IntegerLiteral);
840 		assert(lex.front.type == TokenType.End);
841 	}
842 	
843 	{
844 		auto lex = testlexer("1lU");
845 		lex.match(TokenType.Begin);
846 		lex.match(TokenType.IntegerLiteral);
847 		assert(lex.front.type == TokenType.End);
848 	}
849 	
850 	{
851 		auto lex = testlexer("1Lu");
852 		lex.match(TokenType.Begin);
853 		lex.match(TokenType.IntegerLiteral);
854 		assert(lex.front.type == TokenType.End);
855 	}
856 	
857 	{
858 		auto lex = testlexer("1LU");
859 		lex.match(TokenType.Begin);
860 		lex.match(TokenType.IntegerLiteral);
861 		assert(lex.front.type == TokenType.End);
862 	}
863 	
864 	{
865 		auto lex = testlexer("1f");
866 		lex.match(TokenType.Begin);
867 		lex.match(TokenType.FloatLiteral);
868 		assert(lex.front.type == TokenType.End);
869 	}
870 	
871 	{
872 		auto lex = testlexer("1F");
873 		lex.match(TokenType.Begin);
874 		lex.match(TokenType.FloatLiteral);
875 		assert(lex.front.type == TokenType.End);
876 	}
877 	
878 	{
879 		auto lex = testlexer("1.f");
880 		lex.match(TokenType.Begin);
881 		lex.match(TokenType.IntegerLiteral);
882 		lex.match(TokenType.Dot);
883 		lex.match(TokenType.Identifier);
884 		assert(lex.front.type == TokenType.End);
885 	}
886 	
887 	{
888 		auto lex = testlexer("1.1f");
889 		lex.match(TokenType.Begin);
890 		lex.match(TokenType.FloatLiteral);
891 		assert(lex.front.type == TokenType.End);
892 	}
893 	
894 	{
895 		auto lex = testlexer("1.1F");
896 		lex.match(TokenType.Begin);
897 		lex.match(TokenType.FloatLiteral);
898 		assert(lex.front.type == TokenType.End);
899 	}
900 	
901 	{
902 		auto lex = testlexer("1.1L");
903 		lex.match(TokenType.Begin);
904 		lex.match(TokenType.FloatLiteral);
905 		assert(lex.front.type == TokenType.End);
906 	}
907 	
908 	{
909 		// /!\ l is *NOT*  a valid suffix, this one is case sensitive.
910 		auto lex = testlexer("1.1l");
911 		lex.match(TokenType.Begin);
912 		lex.match(TokenType.Invalid);
913 		assert(lex.front.type == TokenType.End);
914 	}
915 	
916 	{
917 		auto lex = testlexer("1.1F");
918 		lex.match(TokenType.Begin);
919 		lex.match(TokenType.FloatLiteral);
920 		assert(lex.front.type == TokenType.End);
921 	}
922 	
923 	{
924 		auto lex = testlexer("1. f");
925 		lex.match(TokenType.Begin);
926 		lex.match(TokenType.IntegerLiteral);
927 		lex.match(TokenType.Dot);
928 		lex.match(TokenType.Identifier);
929 		assert(lex.front.type == TokenType.End);
930 	}
931 	
932 	{
933 		auto lex = testlexer("1.1 f");
934 		lex.match(TokenType.Begin);
935 		lex.match(TokenType.FloatLiteral);
936 		lex.match(TokenType.Identifier);
937 		assert(lex.front.type == TokenType.End);
938 	}
939 	
940 	{
941 		auto lex = testlexer(`q"(("))"`);
942 		lex.match(TokenType.Begin);
943 		
944 		auto t = lex.front;
945 		
946 		assert(t.type == TokenType.StringLiteral);
947 		assert(t.name.toString(context) == `(")`);
948 		lex.popFront();
949 		
950 		assert(lex.front.type == TokenType.End);
951 	}
952 
953 	{
954 		auto lex = testlexer(`q"[]"`);
955 		lex.match(TokenType.Begin);
956 		
957 		auto t = lex.front;
958 		
959 		assert(t.type == TokenType.StringLiteral);
960 		assert(t.name.toString(context) == "");
961 		lex.popFront();
962 		
963 		assert(lex.front.type == TokenType.End);
964 	}
965 
966 	{
967 		auto lex = testlexer(`q"{<}"`);
968 		lex.match(TokenType.Begin);
969 		
970 		auto t = lex.front;
971 		
972 		assert(t.type == TokenType.StringLiteral);
973 		assert(t.name.toString(context) == "<");
974 		lex.popFront();
975 		
976 		assert(lex.front.type == TokenType.End);
977 	}
978 	
979 	{
980 		auto lex = testlexer(`q"<">"`);
981 		lex.match(TokenType.Begin);
982 		
983 		auto t = lex.front;
984 		
985 		assert(t.type == TokenType.StringLiteral);
986 		assert(t.name.toString(context) == `"`);
987 		lex.popFront();
988 		
989 		assert(lex.front.type == TokenType.End);
990 	}
991 	
992 	{
993 		auto lex = testlexer("q{{foo}}");
994 		lex.match(TokenType.Begin);
995 		
996 		auto t = lex.front;
997 		
998 		assert(t.type == TokenType.StringLiteral);
999 		assert(t.name.toString(context) == "{foo}");
1000 		lex.popFront();
1001 		
1002 		assert(lex.front.type == TokenType.End);
1003 	}
1004 	
1005 	{
1006 		auto lex = testlexer(`q"EOF
1007 EOF"`);
1008 		lex.match(TokenType.Begin);
1009 		
1010 		auto t = lex.front;
1011 		
1012 		assert(t.type == TokenType.StringLiteral);
1013 		assert(t.name.toString(context) == "");
1014 		lex.popFront();
1015 		
1016 		assert(lex.front.type == TokenType.End);
1017 	}
1018 	
1019 	{
1020 		auto lex = testlexer(`q"EOF
1021 
1022 EOF"`);
1023 		lex.match(TokenType.Begin);
1024 		
1025 		auto t = lex.front;
1026 		
1027 		assert(t.type == TokenType.StringLiteral);
1028 		assert(t.name.toString(context) == "\n");
1029 		lex.popFront();
1030 		
1031 		assert(lex.front.type == TokenType.End);
1032 	}
1033 	
1034 	{
1035 		auto lex = testlexer(`q"MONKEYS
1036 🙈🙉🙊
1037 MONKEYS"`);
1038 		lex.match(TokenType.Begin);
1039 		
1040 		auto t = lex.front;
1041 		
1042 		assert(t.type == TokenType.StringLiteral);
1043 		assert(t.name.toString(context) == "🙈🙉🙊\n");
1044 		lex.popFront();
1045 		
1046 		assert(lex.front.type == TokenType.End);
1047 	}
1048 	
1049 	{
1050 		auto lex = testlexer(`q"I_LOVE_PYTHON
1051 """python comment!"""
1052 I_LOVE_PYTHON"`);
1053 		lex.match(TokenType.Begin);
1054 		
1055 		auto t = lex.front;
1056 		
1057 		assert(t.type == TokenType.StringLiteral);
1058 		assert(t.name.toString(context) == `"""python comment!"""` ~ '\n');
1059 		lex.popFront();
1060 		
1061 		assert(lex.front.type == TokenType.End);
1062 	}
1063 	
1064 	{
1065 		auto lex = testlexer(`r"\r"`);
1066 		lex.match(TokenType.Begin);
1067 		
1068 		auto t = lex.front;
1069 		
1070 		assert(t.type == TokenType.StringLiteral);
1071 		assert(t.name.toString(context) == "\\r");
1072 		lex.popFront();
1073 		
1074 		assert(lex.front.type == TokenType.End);
1075 	}
1076 }