1 module memutils.scoped;
2 
3 import memutils.constants;
4 import memutils.allocators;
5 import memutils.pool;
6 import memutils.utils;
7 import memutils.vector;
8 import memutils.refcounted;
9 import memutils.unique;
10 import memutils.hashmap;
11 import memutils.freelist;
12 import memutils.memory;
13 import memutils.helpers;
14 import std.traits : isArray, hasElaborateDestructor;
15 
16 alias ScopedPool = ScopedPoolImpl;
17 @trusted:
18 nothrow:
19 struct ScopedPoolImpl {
20 nothrow:
21 	// TODO: Use a name for debugging?
22 
23 	int id;
24 	/// Initializes a scoped pool with max_mem
25 	this(size_t max_mem) {
26 		PoolStack.push(max_mem);
27 		id = PoolStack.top.id;
28 	}
29 
30 	this(ManagedPool pool) {
31 		PoolStack.push(pool);
32 		id = PoolStack.top.id;
33 	}
34 
35 	~this() {
36 		assert (id == PoolStack.top.id);
37 		PoolStack.pop();
38 	}
39 
40 	/// Use only if ScopedPool is the highest on stack.
41 	void freeze() {
42 		assert(id == PoolStack.top.id);
43 		PoolStack.freeze(1);
44 	}
45 
46 	void unfreeze() {
47 		PoolStack.unfreeze(1);
48 		assert(id == PoolStack.top.id);
49 	}
50 }
51 /*
52 T alloc(T, ARGS...)(auto ref ARGS args)
53 	if (is(T == class) || is(T == interface) || __traits(isAbstractClass, T))
54 {
55 	T ret;
56 	
57 	if (!PoolStack.empty) {
58 		ret = ObjectAllocator!(T, PoolStack).alloc(args);
59 		
60 		// Add destructor to pool
61 		logTrace("Trying to add __dtor for ", T.stringof);
62 		static if (hasElaborateDestructor!T || __traits(hasMember, T, "__dtor") ) {
63 			PoolStack.top().onDestroy(&ret.__dtor);
64 		}
65 	}
66 	
67 	return ret;
68 }
69 */
70 T* alloc(T, ARGS...)(auto ref ARGS args)
71 {
72 	T* ret;
73 	if (__ctfe) {
74 		assert(__ctfe);
75 		ret = ObjectAllocator!(T, PoolStack)().alloc(args);
76 	} else {
77 		if (!PoolStack.empty) {
78 			ret = ObjectAllocator!(T, PoolStack)().alloc(args);
79 			
80 			// Add destructor to pool
81 			static if (hasElaborateDestructor!T || __traits(hasMember, T, "__xdtor") ) 
82 				PoolStack.top.onDestroy(&((*ret).__xdtor));
83 			
84 		}
85 
86 	}
87 	
88 	return ret;
89 }
90 
91 /// arrays
92 auto alloc(T)(size_t n)
93 	if (isArray!T)
94 {
95 	
96 	T ret;
97 	if (!PoolStack.empty) {
98 		ret = allocArray!(ElementType!T, PoolStack)(n);
99 		registerPoolArray(ret);
100 	}
101 	return ret;
102 }
103 
104 auto realloc(T)(ref T arr, size_t n)
105 	if (isArray!T)
106 {
107 	T ret;
108 	if (!PoolStack.empty) {
109 		scope(exit) arr = null;
110 		ret = reallocArray!(ElementType!T, PoolStack)(arr, n);
111 		reregisterPoolArray(arr, ret);
112 	}
113 }
114 
115 auto copy(T)(auto ref T arr)
116 	if (isArray!T)
117 {
118 	if (__ctfe) {
119 		assert(__ctfe);
120 		return cast()cast(T)arr[0 .. $];
121 	} else {
122 		alias ElType = UnConst!(typeof(arr[0]));
123 		enum ElSize = ElType.sizeof;
124 		T arr_copy;
125 		if (!PoolStack.empty) {
126 			arr_copy = cast(T)allocArray!(ElementType!T, PoolStack)(arr.length);
127 			registerPoolArray(arr_copy);
128 			memcpy(cast(void*)arr_copy.ptr, cast(void*)arr.ptr, arr.length * ElSize);
129 		}
130 
131 		return arr_copy;
132 	}
133 }
134 
135 struct PoolStack {
136 static:
137 nothrow:
138 	@property bool empty() { return m_tstack.empty; }
139 	@property size_t length() { return m_tstack.length; }
140 	void initialize() {
141 		m_tstack = ThreadPoolStack.init;
142 		m_tfreezer = ThreadPoolFreezer.init;
143 	}
144 	/// returns the most recent unfrozen pool, null if none available
145 	@property ManagedPool top() {
146 		return m_tstack.top;
147 	}
148 	void push(ManagedPool pool) {
149 		//logTrace("Push ManagedPool ThreadStack");
150 		m_tstack.push(pool);
151 	}
152 	/// creates a new pool as the fiber stack top or the thread stack top
153 	void push(size_t max_mem = 0) {
154 		//logTrace("Pushing PoolStack");
155 		
156 		//logTrace("Push ThreadStack");
157 		m_tstack.push(max_mem);
158 		//logTrace("Pushed ThreadStack");
159 	}
160 
161 	/// destroy the most recent pool and free all its resources, calling destructors
162 	/// if you're in a fiber, search for stack top in the fiber stack or the fiber freezer and destroy it.
163 	/// otherwise, search in the thread stack or the thread freezer and destroy it.
164 	void pop() {
165 		//logTrace("Pop PoolStack");
166 		// else
167 		//auto top = m_tstack.top;
168 		//assert(top, "Can't find a pool to pop");
169 		//logTrace("Pop ThreadStack");
170 		if (m_tstack.hasTop)
171 			return m_tstack.pop();
172 		//logTrace("Doesn't have top?");
173 		//else
174 		//logTrace("Destroying");
175 
176 	}
177 
178 	void disable() {
179 		freeze(m_tstack.length);
180 	}
181 
182 	void enable() {
183 		unfreeze(m_tfreezer.length);
184 	}
185 
186 	// returns number of pools frozen
187 	size_t freeze(size_t n = 1) {
188 		//logTrace("Freeze");
189 		auto tsz = min(m_tstack.length, n);
190 		if (tsz > 0) {
191 			auto frozen = m_tstack.freeze(tsz);
192 			m_tfreezer.push(frozen);
193 		}
194 		return tsz;
195 	}
196 
197 	// returns number of pools unfrozen
198 	size_t unfreeze(size_t n = 1) {
199 		//logTrace("Unfreeze");
200 		auto tsz = min(m_tfreezer.length, n);
201 		if (tsz > 0) {
202 			auto frozen = m_tfreezer.pop(tsz);
203 			m_tstack.unfreeze(frozen);
204 		}
205 		return tsz;
206 	}
207 
208 private static:
209 	// active
210 	__gshared ThreadPoolStack m_tstack;
211 
212 	// frozen
213 	__gshared ThreadPoolFreezer m_tfreezer;
214 
215 }
216 
217 alias ManagedPool = RefCounted!Pool;
218 
219 package:
220 
221 alias Pool = PoolAllocator!(AutoFreeListAllocator!(MallocAllocator));
222 
223 /// User utility for allocating on lower level pools
224 struct ThreadPoolFreezer 
225 {
226 nothrow:
227 	@disable this(this);
228 	@property size_t length() const { return m_pools.length; }
229 	@property bool empty() const { return length == 0; }
230 
231 	void push(ref Vector!(ManagedPool, ThreadMem) pools)
232 	{
233 		//logTrace("Push Thread Freezer of ", m_pools.length);
234 		// insert sorted
235 		foreach(ref item; pools[]) {
236 			bool found;
237 			foreach (size_t i, ref el; m_pools[]) {
238 				if (item.id < el.id) {
239 					m_pools.insertBefore(i, item);
240 					found = true;
241 					break;
242 				}
243 			}
244 			if (!found) m_pools ~= item;
245 		}
246 		//logTrace("Pushed Thread Freezer now ", m_pools.length);
247 	}
248 
249 	Vector!(ManagedPool, ThreadMem) pop(size_t n) {
250 		assert(!empty);
251 		//logTrace("Pop Thread Freezer of ", m_pools.length, " id ", m_pools.back.id);
252 		// already sorted
253 		auto pools = Vector!(ManagedPool, ThreadMem)( m_pools[$-n .. $] );
254 		m_pools.length = (m_pools.length - n);
255 		//logTrace("Popped Thread Freezer returning ", pools.length, " expecting ", n);
256 		//logTrace("Returning ID ", pools.back.id);
257 		return pools.move;
258 	}
259 	
260 package:
261 	Vector!(ManagedPool, ThreadMem) m_pools;
262 }
263 
264 struct ThreadPoolStack
265 {
266 nothrow:
267 	@disable this(this);
268 	@property size_t length() const { return m_pools.length; }
269 	@property bool empty() const { return length == 0; }
270 	size_t opDollar() const { return length; }
271 	@property bool hasTop() { return length > 0; }
272 
273 
274 	ManagedPool opIndex(size_t n) {
275 		logTrace("OpIndex[", n, "] in Thread Pool of ", length, " top: ", cnt, " id: ", m_pools[n].id);
276 		return m_pools[n];
277 	}
278 
279 	@property ManagedPool top() 
280 	{
281 		//logTrace("Front Thread Pool of ", length);
282 		if (empty) {
283 			logError("Empty PoolStack");
284 			//logTrace("Empty");
285 			return ManagedPool(0);
286 		}
287 		return m_pools.back;
288 	}
289 
290 	void pop()
291 	{
292 		//assert(!empty);
293 		//logTrace("Pop Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
294 		//auto pool = m_pools.back;
295 		//assert(pool.id == cnt);
296 		m_pools.removeBack();
297 		//logTrace("Popped Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.length > 0 ? charFromInt[m_pools.back.id] : '?');
298 	}
299 
300 	void push(ManagedPool pool) {
301 		pool.id = *cast(int*)&pool.id;
302 
303 		m_pools.pushBack(pool);
304 	}
305 	
306 	void push(size_t max_mem = 0) {
307 		//if (!m_pools.empty) logTrace("Push Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
308 		//else logTrace("Push Thread Pool of ", length, " top: ", cnt);
309 		ManagedPool pool = ManagedPool(max_mem);
310 		pool.id = *cast(int*)&pool.id;
311 		m_pools.pushBack(pool);
312 		//logTrace("Pushed Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
313 	}
314 
315 	Vector!(ManagedPool, ThreadMem) freeze(size_t n) {
316 		//assert(!empty);
317 		//if (!m_pools.empty) logTrace("Freeze ", n, " in Thread Pool of ", length, " top: ", cnt);
318 		//else logTrace("Freeze ", n, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
319 		//assert(n <= length);
320 		Vector!(ManagedPool, ThreadMem) ret = Vector!(ManagedPool, ThreadMem)( m_pools[$-n .. $] );
321 		m_pools.length = (m_pools.length - n);
322 		//logTrace("Returning ", ret.length);
323 		//logTrace("Freezeed ", n, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.length > 0 ? charFromInt[m_pools.back.id] : '?');
324 		return ret.move;
325 	}
326 
327 	void unfreeze(ref Vector!(ManagedPool, ThreadMem) pools) {
328 		//logTrace("Unfreeze ", pools.length, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.length > 0 ? charFromInt[m_pools.back.id] : '?');
329 		// insert sorted
330 		foreach(ref item; pools[]) {
331 			bool found;
332 			foreach (size_t i, ref el; m_pools[]) {
333 				if (item.id < el.id) {
334 					m_pools.insertBefore(i, item);
335 					found = true;
336 					break;
337 				}
338 			}
339 			if (!found) m_pools ~= item;
340 		}
341 		logTrace("Unfreezed ", pools.length, " in Thread Pool of ", length, " top: ", cnt, " back id: ", m_pools.back.id);
342 	}
343 
344 package:
345 	int cnt;
346 	Vector!(ManagedPool, ThreadMem) m_pools;
347 }
348 
349 
350 private void registerPoolArray(T)(ref T arr) {
351 	// Add destructors to fiber pool
352 	static if (is(T == struct) && (hasElaborateDestructor!(ElementType!T) || __traits(hasMember, ElementType!T, "__xdtor") )) {
353 		foreach (ref el; arr)
354 			PoolStack.top.onDestroy(&el.__xdtor);
355 	}
356 }
357 
358 private void reregisterPoolArray(T)(ref T arr, ref T arr2) {
359 	// Add destructors to fiber pool
360 	logTrace("reregisterPoolArray");
361 	static if (is(T == struct) && (hasElaborateDestructor!(ElementType!T) || __traits(hasMember, ElementType!T, "__xdtor") )) {
362 		if (arr.ptr is arr2.ptr && arr2.length > arr.length) {
363 			foreach (ref el; arr2[arr.length - 1 .. $])
364 				PoolStack.top.onDestroy(&el.__xdtor);
365 		}
366 		else {
367 			PoolStack.top.removeArrayDtors(&arr.back.__xdtor, arr.length);
368 			registerPoolArray(arr2);
369 		}
370 	}
371 }