1 module d.gc.arena;
2 
3 extern(C) void* __sd_gc_tl_malloc(size_t size) {
4 	return tl.alloc(size);
5 }
6 
7 extern(C) void* __sd_gc_tl_array_alloc(size_t size) {
8 	return __sd_gc_tl_malloc(size);
9 }
10 
11 extern(C) void _tl_gc_free(void* ptr) {
12 	tl.free(ptr);
13 }
14 
15 extern(C) void* _tl_gc_realloc(void* ptr, size_t size) {
16 	return tl.realloc(ptr, size);
17 }
18 
19 extern(C) void _tl_gc_set_stack_bottom(const void* bottom) {
20 	// tl.stackBottom = makeRange(bottom[]).ptr;
21 	tl.stackBottom = makeRange(bottom[0 .. 0]).ptr;
22 }
23 
24 extern(C) void _tl_gc_add_roots(const void[] range) {
25 	tl.addRoots(range);
26 }
27 
28 extern(C) void _tl_gc_collect() {
29 	tl.collect();
30 }
31 
32 Arena tl;
33 
34 struct Arena {
35 	// Spare chunk to avoid churning too much.
36 	import d.gc.chunk;
37 	Chunk* spare;
38 
39 	// Free runs we can allocate from.
40 	import d.gc.rbtree, d.gc.run;
41 	RBTree!(RunDesc, sizeAddrRunCmp) freeRunTree;
42 
43 	// Extent describing huge allocs.
44 	import d.gc.extent;
45 	ExtentTree hugeTree;
46 	ExtentTree hugeLookupTree;
47 
48 	// Set of chunks for GC lookup.
49 	ChunkSet chunkSet;
50 
51 	const(void*)* stackBottom;
52 	const(void*)[][] roots;
53 
54 	import d.gc.bin, d.gc.sizeclass;
55 	Bin[ClassCount.Small] bins;
56 
57 	void* alloc(size_t size) {
58 		if (size <= SizeClass.Small) {
59 			return allocSmall(size);
60 		}
61 
62 		if (size <= SizeClass.Large) {
63 			return allocLarge(size, false);
64 		}
65 
66 		return allocHuge(size);
67 	}
68 
69 	void* calloc(size_t size) {
70 		if (size <= SizeClass.Small) {
71 			auto ret = allocSmall(size);
72 			memset(ret, 0, size);
73 			return ret;
74 		}
75 
76 		if (size <= SizeClass.Large) {
77 			return allocLarge(size, true);
78 		}
79 
80 		return allocHuge(size);
81 	}
82 
83 	void free(void* ptr) {
84 		auto c = findChunk(ptr);
85 		if (c !is null) {
86 			// This is not a huge alloc, assert we own the arena.
87 			assert(c.header.arena is &this);
88 
89 			freeInChunk(ptr, c);
90 		} else {
91 			freeHuge(ptr);
92 		}
93 	}
94 
95 	void* realloc(void* ptr, size_t size) {
96 		if (size == 0) {
97 			free(ptr);
98 			return null;
99 		}
100 
101 		if (ptr is null) {
102 			return alloc(size);
103 		}
104 
105 		auto newBinID = getBinID(size);
106 
107 		// TODO: Try in place resize for large/huge.
108 		auto oldBinID = newBinID;
109 
110 		auto c = findChunk(ptr);
111 		if (c !is null) {
112 			// This is not a huge alloc, assert we own the arena.
113 			assert(c.header.arena is &this);
114 			oldBinID = c.pages[c.getRunID(ptr)].binID;
115 		} else {
116 			auto e = extractHugeExtent(ptr);
117 			assert(e !is null);
118 
119 			// We need to keep it alive for now.
120 			hugeTree.insert(e);
121 			oldBinID = getBinID(e.size);
122 		}
123 
124 		if (newBinID == oldBinID) {
125 			return ptr;
126 		}
127 
128 		auto newPtr = alloc(size);
129 		if (newPtr is null) {
130 			return null;
131 		}
132 
133 		auto cpySize =
134 			(newBinID > oldBinID) ? getSizeFromBinID(oldBinID) : size;
135 
136 		memcpy(newPtr, ptr, cpySize);
137 
138 		free(ptr);
139 		return newPtr;
140 	}
141 
142 private:
143 	/**
144 	 * Small allocation facilities.
145 	 */
146 	void* allocSmall(size_t size) {
147 		// TODO: in contracts
148 		assert(size <= SizeClass.Small);
149 
150 		auto binID = getBinID(size);
151 		assert(binID < ClassCount.Small);
152 
153 		// Load eagerly as prefetching.
154 		size = binInfos[binID].itemSize;
155 
156 		auto run = findSmallRun(binID);
157 		if (run is null) {
158 			return null;
159 		}
160 
161 		auto index = run.small.allocate();
162 		auto base = cast(void*) &run.chunk.datas[run.runID];
163 
164 		return base + size * index;
165 	}
166 
167 	RunDesc* findSmallRun(ubyte binID) {
168 		// XXX: in contract.
169 		assert(binID < ClassCount.Small);
170 
171 		auto run = bins[binID].current;
172 		if (run !is null && run.small.freeSlots != 0) {
173 			return run;
174 		}
175 
176 		// This will allow to keep track if metadata are allocated in that bin.
177 		bins[binID].current = null;
178 
179 		// XXX: use extract or something.
180 		run = bins[binID].runTree.bestfit(null);
181 		if (run is null) {
182 			// We don't have any run that fit, allocate a new one.
183 			return allocateSmallRun(binID);
184 		}
185 
186 		bins[binID].runTree.remove(run);
187 		return bins[binID].current = run;
188 	}
189 
190 	RunDesc* allocateSmallRun(ubyte binID) {
191 		// XXX: in contract.
192 		assert(binID < ClassCount.Small);
193 		assert(bins[binID].current is null);
194 
195 		import d.gc.spec;
196 		uint needPages = binInfos[binID].needPages;
197 		auto runBinID = getBinID(needPages << LgPageSize);
198 
199 		auto run = extractFreeRun(runBinID);
200 		if (run is null) {
201 			return null;
202 		}
203 
204 		// We may have allocated the run we need when allocating metadata.
205 		if (bins[binID].current !is null) {
206 			assert(run !is bins[binID].current);
207 
208 			// In which case we put the free run back in the tree.
209 			assert(run.chunk.pages[run.runID].free);
210 			freeRunTree.insert(run);
211 
212 			// And use the metadata run.
213 			return bins[binID].current;
214 		}
215 
216 		auto c = run.chunk;
217 		auto i = run.runID;
218 
219 		auto rem = c.splitSmallRun(i, binID);
220 		if (rem) {
221 			assert(c.pages[rem].free);
222 			freeRunTree.insert(&c.runs[rem]);
223 		}
224 
225 		return bins[binID].current = run;
226 	}
227 
228 	/**
229 	 * Large allocation facilities.
230 	 */
231 	void* allocLarge(size_t size, bool zero) {
232 		// TODO: in contracts
233 		assert(size > SizeClass.Small && size <= SizeClass.Large);
234 
235 		auto run = allocateLargeRun(getAllocSize(size), zero);
236 		if (run is null) {
237 			return null;
238 		}
239 
240 		return cast(void*) &run.chunk.datas[run.runID];
241 	}
242 
243 	RunDesc* allocateLargeRun(size_t size, bool zero) {
244 		// TODO: in contracts
245 		assert(size > SizeClass.Small && size <= SizeClass.Large);
246 		assert(size == getAllocSize(size));
247 
248 		auto binID = getBinID(size);
249 		assert(binID >= ClassCount.Small && binID < ClassCount.Large);
250 
251 		auto run = extractFreeRun(binID);
252 		if (run is null) {
253 			return null;
254 		}
255 
256 		auto c = run.chunk;
257 		auto i = run.runID;
258 
259 		auto rem = c.splitLargeRun(size, i, binID, zero);
260 		if (rem) {
261 			assert(c.pages[rem].free);
262 			freeRunTree.insert(&c.runs[rem]);
263 		}
264 
265 		return run;
266 	}
267 
268 	/**
269 	 * Extract free run from an existing or newly allocated chunk.
270 	 * The run will not be present in the freeRunTree.
271 	 */
272 	RunDesc* extractFreeRun(ubyte binID) {
273 		// XXX: use extract or something.
274 		auto run = freeRunTree.bestfit(cast(RunDesc*) binID);
275 		if (run !is null) {
276 			freeRunTree.remove(run);
277 			return run;
278 		}
279 
280 		auto c = allocateChunk();
281 		if (c is null) {
282 			// XXX: In the multithreaded version, we should
283 			// retry reuse as one run can have been freed
284 			// while we tried to allocate the chunk.
285 			return null;
286 		}
287 
288 		// In rare cases, we may have allocated metadata
289 		// in the chunk for bookkeeping.
290 		if (c.pages[0].allocated) {
291 			return extractFreeRun(binID);
292 		}
293 
294 		return &c.runs[0];
295 	}
296 
297 	/**
298 	 * Free in chunk.
299 	 */
300 	void freeInChunk(void* ptr, Chunk* c) {
301 		auto runID = c.getRunID(ptr);
302 		auto pd = c.pages[runID];
303 		assert(pd.allocated);
304 
305 		if (pd.small) {
306 			freeSmall(ptr, c, pd.binID, runID);
307 		} else {
308 			freeLarge(ptr, c, pd.binID, runID);
309 		}
310 	}
311 
312 	void freeSmall(void* ptr, Chunk* c, uint binID, uint runID) {
313 		// XXX: in contract.
314 		assert(binID < ClassCount.Small);
315 
316 		auto offset = (cast(uint) ptr) - (cast(uint) &c.datas[runID]);
317 
318 		auto binInfo = binInfos[binID];
319 		auto size = binInfo.itemSize;
320 		auto index = offset / size;
321 
322 		// Sanity check: no intern pointer.
323 		auto base = cast(void*) &c.datas[runID];
324 		assert(ptr is (base + size * index));
325 
326 		auto run = &c.runs[runID];
327 		run.small.free(index);
328 
329 		auto freeSlots = run.small.freeSlots;
330 		if (freeSlots == binInfo.slots) {
331 			if (run is bins[binID].current) {
332 				bins[binID].current = null;
333 			} else if (binInfo.slots > 1) {
334 				// When we only have one slot in the run,
335 				// it is never added to the tree.
336 				bins[binID].runTree.remove(run);
337 			}
338 
339 			freeRun(c, runID, binInfo.needPages);
340 		} else if (freeSlots == 1 && run !is bins[binID].current) {
341 			bins[binID].runTree.insert(run);
342 		}
343 	}
344 
345 	void freeLarge(void* ptr, Chunk* c, uint binID, uint runID) {
346 		// TODO: in contracts
347 		assert(binID >= ClassCount.Small && binID < ClassCount.Large);
348 
349 		// Sanity check: no interior pointer.
350 		auto base = cast(void*) &c.datas[runID];
351 		assert(ptr is base);
352 
353 		import d.gc.spec;
354 		auto pages = cast(uint) (getSizeFromBinID(binID) >> LgPageSize);
355 		freeRun(c, runID, pages);
356 	}
357 
358 	void freeRun(Chunk* c, uint runID, uint pages) {
359 		// XXX: in contract.
360 		auto pd = c.pages[runID];
361 		assert(pd.allocated && pd.offset == 0);
362 		assert(pages > 0);
363 
364 		// XXX: find a way to merge dirty and clean free runs.
365 		if (runID > 0) {
366 			auto previous = c.pages[runID - 1];
367 			if (previous.free && previous.dirty) {
368 				runID -= previous.pages;
369 				pages += previous.pages;
370 
371 				assert(c.pages[runID].free);
372 				freeRunTree.remove(&c.runs[runID]);
373 			}
374 		}
375 
376 		if (runID + pages < DataPages) {
377 			auto nextID = runID + pages;
378 			auto next = c.pages[nextID];
379 
380 			if (next.free && next.dirty) {
381 				pages += next.pages;
382 
383 				assert(c.pages[nextID].free);
384 				freeRunTree.remove(&c.runs[nextID]);
385 			}
386 		}
387 
388 		import d.gc.spec;
389 		auto runBinID = cast(ubyte) (getBinID((pages << LgPageSize) + 1) - 1);
390 
391 		// If we have remaining free space, keep track of it.
392 		import d.gc.bin;
393 		auto d = PageDescriptor(false, false, false, true, runBinID, pages);
394 
395 		c.pages[runID] = d;
396 		c.pages[runID + pages - 1] = d;
397 
398 		assert(c.pages[runID].free);
399 		freeRunTree.insert(&c.runs[runID]);
400 
401 		// XXX: remove dirty
402 	}
403 
404 	/**
405 	 * Huge alloc/free facilities.
406 	 */
407 	void* allocHuge(size_t size) {
408 		// TODO: in contracts
409 		assert(size > SizeClass.Large);
410 
411 		size = getAllocSize(size);
412 		if (size == 0) {
413 			// You can't reserve the whole address space.
414 			return null;
415 		}
416 
417 		// XXX: Consider having a run for extent.
418 		// it should provide good locality for huge
419 		// alloc lookup (for GC scan and huge free).
420 		import d.gc.extent;
421 		auto e = cast(Extent*) allocSmall(Extent.sizeof);
422 		e.arena = &this;
423 
424 		import d.gc.mman, d.gc.spec;
425 		auto ret = map_chunks(((size - 1) >> LgChunkSize) + 1);
426 		if (ret is null) {
427 			free(e);
428 			return null;
429 		}
430 
431 		e.addr = ret;
432 		e.size = size;
433 
434 		hugeTree.insert(e);
435 
436 		return ret;
437 	}
438 
439 	Extent* extractHugeExtent(void* ptr) {
440 		// XXX: in contracts
441 		import d.gc.spec;
442 		assert(((cast(size_t) ptr) & AlignMask) == 0);
443 
444 		Extent test;
445 		test.addr = ptr;
446 		auto e = hugeTree.extract(&test);
447 		if (e is null) {
448 			e = hugeLookupTree.extract(&test);
449 		}
450 
451 		// FIXME: out contract.
452 		if (e !is null) {
453 			assert(e.addr is ptr);
454 			assert(e.arena is &this);
455 		}
456 
457 		return e;
458 	}
459 
460 	void freeHuge(void* ptr) {
461 		if (ptr is null) {
462 			// free(null) is valid, we want to handle it properly.
463 			return;
464 		}
465 
466 		freeExtent(extractHugeExtent(ptr));
467 	}
468 
469 	void freeExtent(Extent* e) {
470 		// FIXME: in contract
471 		assert(e !is null);
472 		assert(hugeTree.find(e) is null);
473 		assert(hugeLookupTree.find(e) is null);
474 
475 		import d.gc.mman;
476 		pages_unmap(e.addr, e.size);
477 
478 		free(e);
479 	}
480 
481 	/**
482 	 * Chunk allocation facilities.
483 	 */
484 	Chunk* allocateChunk() {
485 		// If we have a spare chunk, use that.
486 		if (spare !is null) {
487 			/*
488 			// FIXME: Only scope(exit) are supported.
489 			scope(success) spare = null;
490 			return spare;
491 			/*/
492 			auto c = spare;
493 			spare = null;
494 			return c;
495 			//*/
496 		}
497 
498 		auto c = Chunk.allocate(&this);
499 		if (c is null) {
500 			return null;
501 		}
502 
503 		assert(c.header.arena is &this);
504 		// assert(c.header.addr is c);
505 
506 		// Adding the chunk as spare so metadata can be allocated
507 		// from it. For instance, this is useful if the chunk set
508 		// needs to be resized to register this chunk.
509 		assert(spare is null);
510 		spare = c;
511 
512 		// If we failed to register the chunk, free and bail out.
513 		if (chunkSet.register(c)) {
514 			c.free();
515 			c = null;
516 		}
517 
518 		spare = null;
519 		return c;
520 	}
521 
522 	/**
523 	 * GC facilities
524 	 */
525 	void addRoots(const void[] range) {
526 		// FIXME: Casting to void* is aparently not handled properly :(
527 		auto ptr = cast(void*) roots.ptr;
528 
529 		// We realloc everytime. It doesn't really matter at this point.
530 		roots.ptr = cast(const(void*)[]*)
531 			realloc(ptr, (roots.length + 1) * void*[].sizeof);
532 
533 		// Using .ptr to bypass bound checking.
534 		roots.ptr[roots.length] = makeRange(range);
535 
536 		// Update the range.
537 		roots = roots.ptr[0 .. roots.length + 1];
538 	}
539 
540 	void collect() {
541 		// Get ready to detect huge allocations.
542 		hugeLookupTree = hugeTree;
543 
544 		// FIXME: This bypass visibility.
545 		hugeTree.root = null;
546 
547 		// TODO: The set need a range interface or some other way to iterrate.
548 		auto chunks = chunkSet.cloneChunks();
549 		foreach (c; chunks) {
550 			if (c is null) {
551 				continue;
552 			}
553 
554 			c.prepare();
555 		}
556 
557 		// Mark bitmap as live.
558 		foreach (c; chunks) {
559 			if (c is null) {
560 				continue;
561 			}
562 
563 			auto bmp = cast(void**) &c.header.bitmap;
564 			scan(bmp[0 .. 1]);
565 		}
566 
567 		// Scan the roots !
568 		__sdgc_push_registers(scanStack);
569 		foreach (range; roots) {
570 			scan(range);
571 		}
572 
573 		// Go on and on until all worklists are empty.
574 		auto needRescan = true;
575 		while (needRescan) {
576 			needRescan = false;
577 			foreach (c; chunks) {
578 				if (c is null) {
579 					continue;
580 				}
581 
582 				needRescan = c.scan() || needRescan;
583 			}
584 		}
585 
586 		// Now we can collect.
587 		foreach (c; chunks) {
588 			if (c is null) {
589 				continue;
590 			}
591 
592 			c.collect();
593 		}
594 
595 		// Extents that have not been moved to hugeTree are dead.
596 		while (!hugeLookupTree.empty) {
597 			freeExtent(hugeLookupTree.extractAny());
598 		}
599 	}
600 
601 	bool scanStack() {
602 		const(void*) p;
603 
604 		auto iptr = cast(size_t) &p;
605 		auto iend = cast(size_t) stackBottom;
606 		auto length = (iend - iptr) / size_t.sizeof;
607 
608 		auto range = (&p)[1 .. length];
609 		return scan(range);
610 	}
611 
612 	bool scan(const(void*)[] range) {
613 		bool newPtr;
614 		foreach (ptr; range) {
615 			auto iptr = cast(size_t) ptr;
616 
617 			import d.gc.spec;
618 			auto c = findChunk(ptr);
619 			if (c !is null && chunkSet.test(c)) {
620 				newPtr = c.mark(ptr) || newPtr;
621 				continue;
622 			}
623 
624 			Extent ecmp;
625 			ecmp.addr = cast(void*) ptr;
626 			auto e = hugeLookupTree.extract(&ecmp);
627 			if (e is null) {
628 				continue;
629 			}
630 
631 			hugeTree.insert(e);
632 
633 			auto hugeRange = makeRange(e.addr[0 .. e.size]);
634 
635 			// FIXME: Ideally, a worklist is preferable as
636 			// 1/ We could recurse a lot this way.
637 			// 2/ We want to keep working on the same chunk for locality.
638 			scan(hugeRange);
639 		}
640 
641 		return newPtr;
642 	}
643 }
644 
645 private:
646 
647 /**
648  * This function get a void[] range and chnage it into a
649  * const(void*)[] one, reducing to alignement boundaries.
650  */
651 const(void*)[] makeRange(const void[] range) {
652 	size_t iptr = cast(size_t) range.ptr;
653 	auto aiptr = (((iptr - 1) / size_t.sizeof) + 1) * size_t.sizeof;
654 
655 	// Align the ptr and remove the differnece from the length.
656 	auto aptr = cast(const(void*)*) aiptr;
657 	if (range.length < 8) {
658 		return aptr[0 .. 0];
659 	}
660 
661 	auto length = (range.length - aiptr + iptr) / size_t.sizeof;
662 	return aptr[0 .. length];
663 }
664 
665 struct ChunkSet {
666 	// Set of chunks for GC lookup.
667 	Chunk** chunks;
668 	uint chunkCount;
669 
670 	// Metadatas for the chunk set.
671 	ubyte lgChunkSetSize;
672 	ubyte maxProbe;
673 
674 	@property
675 	Arena* arena() {
676 		// We can find the arena from this pointer.
677 		auto chunkSetOffset = cast(size_t) (&(cast(Arena*) null).chunkSet);
678 		return cast(Arena*) ((cast(size_t) &this) - chunkSetOffset);
679 	}
680 
681 	import d.gc.chunk;
682 	bool register(Chunk* c) {
683 		// FIXME: in contract
684 		assert(!test(c));
685 
686 		// We resize if the set is 7/8 full.
687 		auto limitSize = (7UL << lgChunkSetSize) / 8;
688 		if (limitSize <= chunkCount) {
689 			if (increaseSize()) {
690 				return true;
691 			}
692 		}
693 
694 		chunkCount++;
695 		insert(c);
696 
697 		// FIXME: out contract
698 		assert(test(c));
699 		return false;
700 	}
701 
702 	/**
703 	 * GC facilities
704 	 */
705 	bool test(Chunk* c) {
706 		// FIXME: in contract
707 		assert(c !is null);
708 
709 		import d.gc.spec;
710 		auto k = (cast(size_t) c) >> LgChunkSize;
711 		auto mask = (1 << lgChunkSetSize) - 1;
712 
713 		foreach (i; 0 .. maxProbe) {
714 			if (c is chunks[(k + i) & mask]) {
715 				return true;
716 			}
717 		}
718 
719 		return false;
720 	}
721 
722 	Chunk*[] cloneChunks() {
723 		auto oldLgChunkSetSize = lgChunkSetSize;
724 		auto allocSize = size_t.sizeof << lgChunkSetSize;
725 		auto buf = cast(Chunk**) arena.alloc(allocSize);
726 
727 		// We may have created a new chunk to allocate the buffer.
728 		while (lgChunkSetSize != oldLgChunkSetSize) {
729 			// If don't think this can run more than once,
730 			// but better safe than sorry.
731 			oldLgChunkSetSize = lgChunkSetSize;
732 			allocSize = size_t.sizeof << lgChunkSetSize;
733 			buf = cast(Chunk**) arena.realloc(buf, allocSize);
734 		}
735 
736 		memcpy(buf, chunks, allocSize);
737 		return buf[0 .. 1UL << lgChunkSetSize];
738 	}
739 
740 private:
741 	/**
742 	 * Internal facilities
743 	 */
744 	void insert(Chunk* c) {
745 		auto mask = (1 << lgChunkSetSize) - 1;
746 
747 		import d.gc.spec;
748 		auto k = (cast(size_t) c) >> LgChunkSize;
749 		auto p = (cast(uint) k) & mask;
750 
751 		auto i = p;
752 		ubyte d = 0;
753 		while (chunks[i] !is null) {
754 			auto ce = chunks[i];
755 			auto ke = (cast(size_t) ce) >> LgChunkSize;
756 			auto pe = (cast(uint) ke) & mask;
757 
758 			// Robin hood hashing.
759 			if (d > (i - pe)) {
760 				chunks[i] = c;
761 				c = ce;
762 				k = ke;
763 				p = pe;
764 
765 				if (d >= maxProbe) {
766 					maxProbe = cast(ubyte) (d + 1);
767 				}
768 			}
769 
770 			i = ((i + 1) & mask);
771 			d = cast(ubyte) (i - p);
772 		}
773 
774 		chunks[i] = c;
775 		if (d >= maxProbe) {
776 			maxProbe = cast(ubyte) (d + 1);
777 		}
778 	}
779 
780 	bool increaseSize() {
781 		auto oldChunks = chunks;
782 		auto oldChunkSetSize = 1UL << lgChunkSetSize;
783 
784 		lgChunkSetSize++;
785 		assert(lgChunkSetSize <= 32);
786 
787 		import d.gc.spec;
788 		// auto newChunks = cast(Chunk**) arena.calloc(Chunk*.sizeof << lgChunkSetSize);
789 		auto newChunks =
790 			cast(Chunk**) arena.calloc(size_t.sizeof << lgChunkSetSize);
791 		assert(oldChunks is chunks);
792 
793 		if (newChunks is null) {
794 			return true;
795 		}
796 
797 		maxProbe = 0;
798 		chunks = newChunks;
799 		auto rem = chunkCount;
800 
801 		for (uint i = 0; rem != 0; i++) {
802 			assert(i < oldChunkSetSize);
803 
804 			auto c = oldChunks[i];
805 			if (c is null) {
806 				continue;
807 			}
808 
809 			insert(c);
810 			rem--;
811 		}
812 
813 		arena.free(oldChunks);
814 		return false;
815 	}
816 }
817 
818 extern(C):
819 version (OSX) {
820 	// For some reason OSX's symbol get a _ prepended.
821 	bool _sdgc_push_registers(bool delegate());
822 	alias __sdgc_push_registers = _sdgc_push_registers;
823 } else {
824 	bool __sdgc_push_registers(bool delegate());
825 }