1 module d.gc.run;
2 
3 struct RunDesc {
4 	import d.gc.rbtree;
5 	Node!RunDesc node;
6 
7 	// TODO: anonymous enum.
8 	union U {
9 		DirtyRunMisc dirty;
10 		SmallRunMisc small;
11 	}
12 
13 	U misc;
14 
15 	@property
16 	ref DirtyRunMisc dirty() {
17 		// FIXME: in contract
18 		auto pd = chunk.pages[runID];
19 		assert(pd.free, "Expected free run");
20 		assert(pd.dirty, "Expected dirty run");
21 
22 		return misc.dirty;
23 	}
24 
25 	@property
26 	ref SmallRunMisc small() {
27 		// FIXME: in contract
28 		auto pd = chunk.pages[runID];
29 		assert(pd.allocated, "Expected allocated run");
30 		assert(pd.offset == 0, "Invalid run");
31 		assert(pd.small, "Expected small run");
32 
33 		return misc.small;
34 	}
35 
36 	@property
37 	auto chunk() {
38 		import d.gc.chunk, d.gc.spec;
39 		return cast(Chunk*) ((cast(size_t) &this) & ~AlignMask);
40 	}
41 
42 	@property
43 	uint runID() {
44 		auto offset = (cast(uint) &this) - (cast(uint) &chunk.runs[0]);
45 		uint r = offset / RunDesc.sizeof;
46 
47 		// FIXME: out contract
48 		import d.gc.chunk;
49 		assert(r < DataPages);
50 		return r;
51 	}
52 }
53 
54 ptrdiff_t addrRunCmp(RunDesc* lhs, RunDesc* rhs) {
55 	auto l = cast(size_t) lhs;
56 	auto r = cast(size_t) rhs;
57 
58 	// We need to compare that way to avoid integer overflow.
59 	return (l > r) - (l < r);
60 }
61 
62 ptrdiff_t sizeAddrRunCmp(RunDesc* lhs, RunDesc* rhs) {
63 	import d.gc.sizeclass;
64 	int rBinID = rhs.chunk.pages[rhs.runID].binID;
65 
66 	auto rsize = rhs.chunk.pages[rhs.runID].size;
67 	assert(rBinID == getBinID(rsize + 1) - 1);
68 
69 	auto l = cast(size_t) lhs;
70 	int lBinID;
71 
72 	import d.gc.spec;
73 	if (l & ~PageMask) {
74 		lBinID = lhs.chunk.pages[lhs.runID].binID;
75 
76 		auto lsize = lhs.chunk.pages[lhs.runID].size;
77 		assert(lBinID == getBinID(lsize + 1) - 1);
78 	} else {
79 		lhs = null;
80 		lBinID = cast(int) (l & PageMask);
81 	}
82 
83 	return (lBinID == rBinID) ? addrRunCmp(lhs, rhs) : lBinID - rBinID;
84 }
85 
86 private:
87 
88 struct DirtyRunMisc {
89 	RunDesc* next;
90 	RunDesc* prev;
91 }
92 
93 struct SmallRunMisc {
94 	ubyte binID;
95 	ushort freeSlots;
96 
97 	ushort bitmapIndex;
98 
99 	ushort header;
100 	uint[16] bitmap;
101 
102 	uint allocate() {
103 		// TODO: in contracts.
104 		assert(freeSlots > 0);
105 
106 		import sdc.intrinsics;
107 		uint hindex = countTrailingZeros(header);
108 		assert(hindex < 16, "Cannot allocate from that run");
109 
110 		auto bindex = countTrailingZeros(bitmap[hindex]);
111 		assert(bindex < 32, "Invalid bitmap");
112 
113 		// Use xor so we don't need to invert bits.
114 		// It is ok as we assert the bit is unset before.
115 		bitmap[hindex] ^= (1 << bindex);
116 
117 		// If we unset all bits, unset header.
118 		if (bitmap[hindex] == 0) {
119 			header ^= cast(ushort) (1 << hindex);
120 		}
121 
122 		// If we are GCing, mark the new allocation as live.
123 		if (bitmapIndex != 0) {
124 			import d.gc.chunk, d.gc.spec;
125 			auto chunk = cast(Chunk*) ((cast(size_t) &this) & ~AlignMask);
126 
127 			assert(chunk.header.bitmap !is null);
128 			auto bPtr = chunk.header.bitmap + bitmapIndex + hindex;
129 
130 			// This is live, don't collect it !
131 			*bPtr = *bPtr | (1 << bindex);
132 		}
133 
134 		freeSlots--;
135 		return hindex * 32 + bindex;
136 	}
137 
138 	bool isFree(uint bit) const {
139 		// TODO: in contract.
140 		assert(bit < 512);
141 
142 		auto hindex = bit / 32;
143 		auto bindex = bit % 32;
144 
145 		// TODO: in contract.
146 		assert(hindex < 16);
147 
148 		return !!(bitmap[hindex] & (1 << bindex));
149 	}
150 
151 	void free(uint bit) {
152 		// TODO: in contract.
153 		assert(bit < 512);
154 		assert(!isFree(bit), "Already freed");
155 
156 		auto hindex = bit / 32;
157 		auto bindex = bit % 32;
158 
159 		// TODO: in contract.
160 		assert(hindex < 16);
161 
162 		freeSlots++;
163 		header |= cast(ushort) (1 << hindex);
164 		bitmap[hindex] |= (1 << bindex);
165 	}
166 }