1 module d.gc.mman;
2 
3 void* map_chunks(size_t count) {
4 	import d.gc.spec;
5 	auto size = count * ChunkSize;
6 	auto ret = pages_map(null, size);
7 
8 	auto offset = (cast(size_t) ret) & AlignMask;
9 	if (offset != 0) {
10 		pages_unmap(ret, size);
11 		ret = map_chunks_slow(count);
12 	}
13 
14 	return ret;
15 }
16 
17 void pages_unmap(void* addr, size_t size) {
18 	if (munmap(addr, size) == -1) {
19 		// TODO: display error.
20 		assert(0, "munmap failed");
21 	}
22 }
23 
24 private:
25 
26 void* pages_map(void* addr, size_t size) {
27 	auto ret =
28 		mmap(addr, size, Prot.Read | Prot.Write, Map.Private | Map.Anon, -1, 0);
29 	assert(ret !is null);
30 
31 	auto MAP_FAILED = cast(void*) -1L;
32 	if (ret is MAP_FAILED) {
33 		ret = null;
34 	} else if (addr !is null && ret !is addr) {
35 		// We mapped, but not where expected.
36 		pages_unmap(ret, size);
37 		ret = null;
38 	}
39 
40 	// XXX: out contract
41 	assert(ret is null || (addr is null && ret !is addr)
42 		|| (addr !is null && ret is addr));
43 	return ret;
44 }
45 
46 void* map_chunks_slow(size_t count) {
47 	import d.gc.spec;
48 	auto size = count * ChunkSize;
49 	auto alloc_size = size + ChunkSize - PageSize;
50 
51 	// Integer overflow.
52 	if (alloc_size < ChunkSize) {
53 		return null;
54 	}
55 
56 	auto ret = cast(void*) null;
57 	do {
58 		auto pages = pages_map(null, alloc_size);
59 		if (pages is null) {
60 			return null;
61 		}
62 
63 		auto ipages = cast(size_t) pages;
64 		auto lead_size = ((ipages + ChunkSize - 1) & ~AlignMask) - ipages;
65 
66 		ret = pages_trim(pages, alloc_size, lead_size, size);
67 	} while (ret is null);
68 
69 	assert(ret !is null);
70 	return ret;
71 }
72 
73 void* pages_trim(void* addr, size_t alloc_size, size_t lead_size, size_t size) {
74 	auto ret = cast(void*) ((cast(size_t) addr) + lead_size);
75 
76 	assert(alloc_size >= lead_size + size);
77 	auto trail_size = alloc_size - lead_size - size;
78 
79 	if (lead_size != 0) {
80 		pages_unmap(addr, lead_size);
81 	}
82 
83 	if (trail_size != 0) {
84 		auto trail_addr = cast(void*) ((cast(size_t) ret) + size);
85 		pages_unmap(trail_addr, trail_size);
86 	}
87 
88 	return ret;
89 }
90 
91 private:
92 
93 // XXX: this is a bad port of mman header.
94 // We should be able to use actual prot of C header soon.
95 alias off_t = long; // Good for now.
96 
97 enum Prot {
98 	None = 0x0,
99 	Read = 0x1,
100 	Write = 0x2,
101 	Exec = 0x4,
102 }
103 
104 version (OSX) {
105 	enum Map {
106 		Shared = 0x01,
107 		Private = 0x02,
108 		Fixed = 0x10,
109 		Anon = 0x1000,
110 	}
111 }
112 
113 version (FreeBSD) {
114 	enum Map {
115 		Shared = 0x01,
116 		Private = 0x02,
117 		Fixed = 0x10,
118 		Anon = 0x1000,
119 	}
120 }
121 
122 version (linux) {
123 	enum Map {
124 		Shared = 0x01,
125 		Private = 0x02,
126 		Fixed = 0x10,
127 		Anon = 0x20,
128 	}
129 }
130 
131 extern(C):
132 void* mmap(void* addr, size_t length, int prot, int flags, int fd,
133            off_t offset);
134 int munmap(void* addr, size_t length);