1 /**
2 	Memory pool with destructors, useful for scoped allocators.
3 
4     Copyright: © 2012-2013 RejectedSoftware e.K.
5     		   © 2014-2015 Etienne Cimon
6     License: Subject to the terms of the MIT license.
7     Authors: Sönke Ludwig, Etienne Cimon
8 */
9 module memutils.pool;
10 
11 import memutils.allocators;
12 import memutils.vector;
13 import memutils.helpers;
14 import memutils.constants;
15 import memutils.utils;
16 
17 struct PoolAllocator(Base)
18 {
19 nothrow:
20 @trusted:
21 	public int id = -1; // intrusive ID used for ScopedPools
22 
23 	static align(8) struct Pool { Pool* next; void[] data; void[] remaining; }
24 
25 	private {
26 		Base* m_baseAllocator;
27 		Pool* m_freePools;
28 		Pool* m_fullPools;
29 		Vector!(void delegate() nothrow @trusted, ThreadMem) m_destructors;
30 		int m_pools;
31 	}
32 	public size_t m_poolSize = 64*1024;
33 	this(size_t pool_size)
34 	{
35 		if (pool_size > 0) {
36 			logTrace("PoolAllocator.this() with ", pool_size);
37 			m_poolSize = pool_size;
38 		}
39 		
40 	}
41 
42 	void[] alloc(size_t sz, bool must_zeroise = true)
43 	{
44 		if (!m_baseAllocator) {
45 			logInfo("Loading pool allocator with base ", Base.stringof);
46 			m_baseAllocator = getAllocator!Base();
47 			if (m_poolSize == 0) m_poolSize = 64*1024;
48 		}
49 		
50 		auto aligned_sz = alignedSize(sz);
51 
52 		Pool* pprev = null;
53 		Pool* p = cast(Pool*)m_freePools;
54 		size_t i;
55 		while(i < m_pools && p && p.remaining.length < aligned_sz ) {
56 			pprev = p;
57 			p = p.next;
58 			i++;
59 		}
60 		
61 		if( !p || p.remaining.length == 0 || p.remaining.length < aligned_sz ) {
62 			auto pmem = m_baseAllocator.alloc(AllocSize!Pool);
63 			p = cast(Pool*)pmem;
64 			logInfo("Allocating m_poolSize ", m_poolSize);
65 			p.data = m_baseAllocator.alloc(max(aligned_sz, m_poolSize));
66 			p.remaining = p.data;
67 			p.next = cast(Pool*)m_freePools;
68 			m_freePools = p;
69 			m_pools++;
70 			pprev = null;
71 		}
72 		//logTrace("0 .. ", aligned_sz, " but remaining: ", p.remaining.length);
73 		auto ret = p.remaining[0 .. aligned_sz];
74 		//logTrace("p.remaining: ", aligned_sz, " .. ", p.remaining.length);
75 		p.remaining = p.remaining[aligned_sz .. $];
76 		if( !p.remaining.length ){
77 			if( pprev ) {
78 				pprev.next = p.next;
79 			} else {
80 				m_freePools = p.next;
81 			}
82 			p.next = cast(Pool*)m_fullPools;
83 			m_fullPools = p;
84 		}
85 		//logDebug("PoolAllocator ", id, " allocated ", sz, " with ", totalSize());
86 		if (must_zeroise) memset(ret.ptr, 0, ret.length);
87 		return ret[0 .. sz];
88 	}
89 	
90 	void[] realloc(void[] arr, size_t newsize, bool must_zeroise = true)
91 	{
92 		auto aligned_sz = alignedSize(arr.length);
93 		auto aligned_newsz = alignedSize(newsize);
94 		//logTrace("realloc: ", arr.ptr, " sz ", arr.length, " aligned: ", aligned_sz, " => ", newsize, " aligned: ", aligned_newsz);
95 		if( aligned_newsz <= aligned_sz ) return arr.ptr[0 .. newsize];
96 		
97 		auto pool = m_freePools;
98 		bool last_in_pool = pool && arr.ptr+aligned_sz == pool.remaining.ptr;
99 		if( last_in_pool && pool.remaining.length+aligned_sz >= aligned_newsz ) {
100 			pool.remaining = pool.remaining[aligned_newsz-aligned_sz .. $];
101 			arr = arr.ptr[0 .. aligned_newsz];
102 			assert(arr.ptr+arr.length == pool.remaining.ptr, "Last block does not align with the remaining space!?");
103 			if (must_zeroise) memset(arr.ptr + arr.length, 0, newsize - arr.length);
104 			return arr[0 .. newsize];
105 		} else {
106 			auto ret = alloc(newsize);
107 			assert(ret.ptr >= arr.ptr+aligned_sz || ret.ptr+ret.length <= arr.ptr, "New block overlaps old one!?");
108 			memcpy(ret.ptr, arr.ptr, min(arr.length, newsize));
109 			if (must_zeroise) memset(arr.ptr, 0, arr.length);
110 			return ret;
111 		}
112 	}
113 	
114 	void free(void[] mem, bool must_zeroise = false)
115 	{
116 		if (must_zeroise) memset(mem.ptr, 0, mem.length);
117 	}
118 	
119 	void freeAll(bool must_zeroise = false)
120 	{
121 		logDebug("Calling ", m_destructors.length, " dtors, Destroy pools: ", m_pools);
122 		// destroy all initialized objects
123 		if (m_destructors.length > 0) {
124 			foreach_reverse (ref dtor; m_destructors[]) {
125 				logTrace("Dtor is null? ", dtor ? 'n' : 'y');
126 				dtor();
127 			}
128 				
129 			logTrace("Called dtor(), destroying array");
130 
131 			destructRecurse(m_destructors);
132 			m_destructors.length = 0;
133 		}
134 
135 		size_t i;
136 		// put all full Pools into the free pools list
137 		for (Pool* p = cast(Pool*)m_fullPools, pnext; p && i < m_pools; (p = pnext), i++) {
138 			// zeroise full pools, maybe redundant for Vector and Hashmap
139 			if (must_zeroise && p.data.length > 0) memset(p.data.ptr, 0, p.data.length);
140 			pnext = p.next;
141 			p.next = cast(Pool*)m_freePools;
142 			m_freePools = cast(Pool*)p;
143 		}
144 		i=0;
145 		// free up all pools
146 		for (Pool* p = cast(Pool*)m_freePools; p && i < m_pools; (p = p.next), i++) {
147 			p.remaining = p.data;
148 		}
149 	}
150 	
151 	void reset(bool must_zeroise = false)
152 	{
153 		//logDebug("Reset()");
154 		freeAll(must_zeroise);
155 		Pool* pnext;
156 		size_t i;
157 		for (auto p = cast(Pool*)m_freePools; p && i < m_pools; (p = pnext), i++) {
158 			pnext = p.next;
159 			m_baseAllocator.free(p.data, must_zeroise);
160 			m_baseAllocator.free((cast(void*)p)[0 .. AllocSize!Pool], must_zeroise);
161 		}
162 		m_freePools = null;
163 		
164 	}
165 
166 	package void onDestroy(void delegate() nothrow @trusted dtor) {
167 		logTrace("Adding to onDestroy null ? ", dtor ? 'n' : 'y', " dtors length: ", m_destructors.length);
168 		m_destructors ~= dtor;
169 	}
170 
171 	package void removeArrayDtors(void delegate() nothrow @trusted last_dtor, size_t n) {
172 		bool found;
173 		logTrace("removeArrayDtors");
174 		foreach_reverse(i, ref el; m_destructors[]) {
175 			if (el == last_dtor)
176 			{
177 				Vector!(void delegate() nothrow @trusted) arr;
178 				if (n >= i)
179 					arr ~= m_destructors[0 .. i-n+1];
180 				if (i != m_destructors.length - 1)
181 					arr ~= m_destructors[i+1 .. $];
182 				m_destructors[] = arr[];
183 				found = true;
184 				break;
185 			}
186 		}
187 		assert(found);
188 	}
189 
190 	@property size_t totalSize()
191 	{
192 		size_t amt = 0;
193 		size_t i;
194 		for (auto p = m_fullPools; p && i < m_pools; (p = p.next), i++)
195 			amt += p.data.length;
196 		i=0;
197 		for (auto p = m_freePools; p && i < m_pools; (p = p.next), i++)
198 			amt += p.data.length;
199 		return amt;
200 	}
201 	
202 	@property size_t allocatedSize()
203 	{
204 		size_t amt = 0;
205 		size_t i;
206 		for (auto p = m_fullPools; p && i < m_pools; (p = p.next), i++)
207 			amt += p.data.length;
208 		i = 0;
209 		for (auto p = m_freePools; p && i < m_pools; (p = p.next), i++)
210 			amt += p.data.length - p.remaining.length;
211 		return amt;
212 	}
213 
214 	~this() { reset(); }
215 }