summaryrefslogtreecommitdiff
path: root/libstd/alloc.myr
blob: b77b90efe8eeaf243362af051d110f224bfcc424 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
use "die.use"
use "extremum.use"
use "sys.use"
use "types.use"
use "units.use"

/*
The allocator implementation here is based on Bonwick's slab allocator.

For small allocations (up to Bktmax), it works by requesting large,
power of two aligned chunks from the operating system, and breaking
them into a linked list of equal sized chunks. Allocations are then
satisfied by taking the head of the list of chunks. Empty slabs
are removed from the freelist.

The data structure looks something like this:
   Bkts:
	[16  byte] -> [slab hdr | chunk -> chunk -> chunk] -> [slab hdr | chunk -> chunk -> chunk]
	[32  byte] -> Zslab
	[64  byte] -> [slab hdr | chunk -> chunk]
	...
	[32k byte] -> ...

Large allocations are simply satisfied by mmap().

*/

pkg std =
	generic alloc	: (		-> @a#)
	generic zalloc	: (		-> @a#)
	generic free	: (v:@a#	-> void)

	generic slalloc	: (len : size	-> @a[:])
	generic slzalloc	: (len : size	-> @a[:])
	generic slgrow	: (sl : @a[:], len : size	-> @a[:])
	generic slzgrow	: (sl : @a[:], len : size	-> @a[:])
	generic slfree	: (sl : @a[:]	-> void)

	const bytealloc	: (sz:size	-> byte#)
	const zbytealloc	: (sz:size	-> byte#)
	const bytefree	: (m:byte#, sz:size	-> void)


;;
extern const put : (b : byte[:], args : ... -> size)

/* null pointers. only used internally. */
const Zbyteptr	= 0 castto(byte#)
const Zslab	= 0 castto(slab#)
const Zchunk	= 0 castto(chunk#)

const Slabsz 	= 1*MiB	/* 1 meg slabs */
const Cachemax	= 16	/* maximum number of slabs in the cache */
const Bktmax	= 32*KiB	/* Slabsz / 8; a balance. */
const Pagesz	= 4*KiB
const Align	= 16	/* minimum allocation alignment */

var buckets	: bucket[32] /* excessive */
var initdone	: int

type slheader = struct
	cap	: size	/* capacity in bytes */
	magic	: size	/* magic check value */
;;

type bucket = struct
	sz	: size	/* aligned size */
	nper	: size	/* max number of elements per slab */
	slabs	: slab#	/* partially filled or free slabs */
	cache	: slab# /* cache of empty slabs, to prevent thrashing */
	ncache	: size  /* size of cache */
;;

type slab = struct
	head	: byte#	/* head of virtual addresses, so we don't leak address space */
	next	: slab#	/* the next slab on the chain */
	freehd	: chunk#	/* the nodes we're allocating */
	nfree	: size	/* the number of free nodes */
;;

type chunk = struct	/* NB: must be smaller than sizeof(slab) */
	next	: chunk#	/* the next chunk in the free list */
;;

/* Allocates an object of type @a, returning a pointer to it. */
generic alloc = {-> @a#
	-> bytealloc(sizeof(@a)) castto(@a#)
}

generic zalloc = {-> @a#
	-> zbytealloc(sizeof(@a)) castto(@a#)
}

/* Frees a value of type @a */
generic free = {v:@a# -> void
	bytefree(v castto(byte#), sizeof(@a))
}

/* allocates a slice of 'len' elements. */
generic slalloc = {len
	var p, sz

	if len == 0
		-> [][:]
	;;
	sz = len*sizeof(@a) + align(sizeof(slheader), Align)
	p = bytealloc(sz)
	p = inithdr(p, sz)
	-> (p castto(@a#))[0:len]
}

generic slzalloc = {len
	var p, sz

	if len == 0
		-> [][:]
	;;
	sz = len*sizeof(@a) + align(sizeof(slheader), Align)
	p = zbytealloc(sz)
	p = inithdr(p, sz)
	-> (p castto(@a#))[0:len]
}

const inithdr = {p, sz
	var phdr, prest

	phdr = p castto(slheader#)
	phdr.cap = allocsz(sz) - align(sizeof(slheader), Align)
	phdr.magic = (0xdeadbeefbadf00d castto(size))

	prest = (p castto(size)) + align(sizeof(slheader), Align)
	-> prest castto(byte#)
}

const checkhdr = {p
	var phdr, addr

	addr = p castto(size)
	addr -= align(sizeof(slheader), Align)
	phdr = addr castto(slheader#)
	assert(phdr.magic == (0xdeadbeefbadf00d castto(size)), "corrupt memory\n")
}

/* Frees a slice */
generic slfree	 = {sl
	var head

	if sl.len == 0
		->
	;;

	checkhdr(sl castto(byte#))
	head = (sl castto(byte#)) castto(size)
	head -= align(sizeof(slheader), Align)
	bytefree(head castto(byte#), slcap(sl castto(byte#)))
}

/* Grows a slice */
generic slgrow = {sl : @a[:], len
	var i, n
	var new

	/* if the slice doesn't need a bigger bucket, we don't need to realloc. */
	if sl.len > 0 && slcap(sl castto(byte#)) >= allocsz(len*sizeof(@a))
		-> (sl castto(@a#))[:len]
	;;

	new = slalloc(len)
	n = min(len, sl.len)
	for i = 0; i < n; i++
		new[i] = sl[i]
	;;
	if sl.len > 0
		slfree(sl)
	;;
	-> new
}

/* Grows a slice, filling new entries with zero bytes */
generic slzgrow = {sl : @a[:], len
	var oldsz

	oldsz = sl.len*sizeof(@a)
	sl = slgrow(sl, len)
	zfill((sl castto(byte#))[oldsz:len*sizeof(@a)])
	-> sl
}

const slcap = {p
	var phdr

	phdr = (p castto(size)) - align(sizeof(slheader), Align) castto(slheader#)
	-> phdr.cap
}

const zbytealloc = {sz
	var p

	p = bytealloc(sz)
	zfill(p[0:sz])
	-> p
}

const zfill = {sl
	var i

	for i = 0; i < sl.len; i++
		sl[i] = 0
	;;
}

/* Allocates a blob that is 'sz' bytes long. Dies if the allocation fails */
const bytealloc = {sz
	var i, bkt, p

	if !initdone
		for i = 0; i < buckets.len && (Align << i) <= Bktmax; i++
			bktinit(&buckets[i], Align << i)
		;;
		initdone = 1
	;;

	if (sz <= Bktmax)
		bkt = &buckets[bktnum(sz)]
		p = bktalloc(bkt)
	else
		p = mmap(Zbyteptr, sz, Mprotrw, Mpriv | Manon, -1, 0)
		if p == Mapbad
			die("could not map memory\n")
		;;
	;;
	-> p
}

/* frees a blob that is 'sz' bytes long. */
const bytefree = {p, sz
	var bkt

	if (sz < Bktmax)
		bkt = &buckets[bktnum(sz)]
		bktfree(bkt, p)
	else
		munmap(p, sz)
	;;
}

/* Sets up a single empty bucket */
const bktinit = {b, sz
	b.sz = align(sz, Align)
	b.nper = (Slabsz - sizeof(slab))/b.sz
	b.slabs = Zslab
	b.cache = Zslab
	b.ncache = 0
}

/* Creates a slab for bucket 'bkt', and fills the chunk list */
const mkslab = {bkt
	var i, p, s
	var b, bnext
	var off /* offset of chunk head */

	if bkt.ncache > 0
		s = bkt.cache
		bkt.cache = s.next
		bkt.ncache--
	;;
	/* tricky: we need power of two alignment, so we allocate double the
	   needed size, chop off the unaligned ends, and waste the address
	   space. Since the OS is "smart enough", this shouldn't actually
	   cost us memory, and 64 bits of address space means that we're not
	   going to have issues with running out of address space for a
	   while. On a 32 bit system this would be a bad idea. */
	p = mmap(Zbyteptr, Slabsz*2, Mprotrw, Mpriv | Manon, -1, 0)
	if p == Mapbad
		die("Unable to mmap")
	;;

	s = align(p castto(size), Slabsz) castto(slab#)
	s.head = p
	s.nfree = bkt.nper
	/* skip past the slab header */
	off = align(sizeof(slab), Align)
	bnext = nextchunk(s castto(chunk#), off)
	s.freehd = bnext
	for i = 0; i < bkt.nper; i++
		b = bnext
		bnext = nextchunk(b, bkt.sz)
		b.next = bnext
	;;
	b.next = Zchunk
	-> s
}

/* 
Allocates a node from bucket 'bkt', crashing if the
allocation cannot be satisfied. Will create a new slab
if there are no slabs on the freelist.
*/
const bktalloc = {bkt
	var s
	var b

	/* find a slab */
	s = bkt.slabs
	if s == Zslab
		s = mkslab(bkt)
		if s == Zslab
			die("No memory left")
		;;
		bkt.slabs = s
	;;

	/* grab the first chunk on the slab */
	b = s.freehd
	s.freehd = b.next
	s.nfree--
	if !s.nfree
		bkt.slabs = s.next
		s.next = Zslab
	;;

	-> b castto(byte#)
}

/*
Frees a chunk of memory 'm' into bucket 'bkt'.
Assumes that the memory already came from a slab
that was created for bucket 'bkt'. Will crash
if this is not the case.
*/
const bktfree = {bkt, m
	var s, b

	s = mtrunc(m, Slabsz) castto(slab#)
	b = m castto(chunk#)
	if s.nfree == 0
		s.next = bkt.slabs
		bkt.slabs = s
	elif s.nfree == bkt.nper
		if bkt.ncache < Cachemax
			s.next = bkt.cache
			bkt.cache = s
		else
			/* we mapped 2*Slabsz so we could align it,
			 so we need to unmap the same */
			munmap(s.head, Slabsz*2)
		;;
	;;
	s.nfree++
	b.next = s.freehd
	s.freehd = b
}

/*
Finds the correct bucket index to allocate from
for allocations of size 'sz'
*/
const bktnum = {sz
	var i, bktsz

	bktsz = Align
	for i = 0; bktsz <= Bktmax; i++
		if bktsz >= sz
			-> i
		;;
		bktsz *= 2
	;;
	die("Size does not match any buckets")
}

/*
returns the actual size we allocated for a given
size request
*/
const allocsz = {sz
	var i, bktsz

	if sz <= Bktmax
		bktsz = Align
		for i = 0; bktsz <= Bktmax; i++
			if bktsz >= sz
				-> bktsz
			;;
			bktsz *= 2
		;;
	else
		-> align(sz, Pagesz)
	;;
}

/*
aligns a size to a requested alignment.
'align' must be a power of two
*/
const align = {v, align
	-> (v + align - 1) & ~(align - 1)
}

/*
chunks are variable sizes, so we can't just
index to get to the next one
*/
const nextchunk = {b, sz : size
	-> ((b castto(intptr)) + (sz castto(intptr))) castto(chunk#)
}

/*
truncates a pointer to 'align'. 'align' must
be a power of two.
*/
const mtrunc = {m, align
	-> ((m castto(intptr)) & ~((align castto(intptr)) - 1)) castto(byte#)
}