1
/*
2
* \brief Interface of AVL-tree-based allocator
3
* \author Norman Feske
4
* \date 2006-04-16
5
*/
6
7
/*
8
* Copyright (C) 2006-2013 Genode Labs GmbH
9
*
10
* This file is part of the Genode OS framework, which is distributed
11
* under the terms of the GNU General Public License version 2.
12
*/
13
14
#
ifndef _INCLUDE__BASE__ALLOCATOR_AVL_H_
15
#
define _INCLUDE__BASE__ALLOCATOR_AVL_H_
16
17
#
include <base/allocator.h>
18
#
include <base/tslab.h>
19
#
include <util/avl_tree.h>
20
#
include <util/misc_math.h>
21
22
namespace
Genode {
23
24
class
Allocator_avl_base;
25
26
template
<
typename
,
unsigned
SLAB_BLOCK_SIZE =
256*
sizeof
(
addr_t)
>
27
class
Allocator_avl_tpl;
28
29
/**
30
* Define AVL-based allocator without any meta data attached to each block
31
*/
32
class
Empty {
}
;
33
typedef
Allocator_avl_tpl<
Empty
>
Allocator_avl
;
34
}
35
36
37
class
Genode::
Allocator_avl_base :
public
Range_allocator
38
{
39
private
:
40
41
static
bool
_sum_in_range(
addr_t
addr
,
addr_t
offset
)
{
42
return
(
~
0UL -
addr >
offset)
;
}
43
44
protected
:
45
46
class
Block :
public
Avl_node<
Block
>
47
{
48
private
:
49
50
addr_t _addr;
/* base address */
51
size_t _size;
/* size of block */
52
bool _used;
/* block is in use */
53
short _id;
/* for debugging */
54
size_t _max_avail;
/* biggest free block size of subtree */
55
56
/**
57
* Request max_avail value of subtree
58
*/
59
inline
size_t
_child_max_avail(
bool
side
)
{
60
return
child(
side)
?
child(
side)
->
max_avail(
)
: 0
;
}
61
62
/**
63
* Query if block can hold a specified subblock
64
*
65
* \param n number of bytes
66
* \param from minimum start address of subblock
67
* \param to maximum end address of subblock
68
* \param align alignment (power of two)
69
* \return true if block fits
70
*/
71
inline
bool
_fits(
size_t
n
,
unsigned
align
,
72
addr_t
from
,
addr_t
to
)
73
{
74
addr_t a =
align_addr(
addr(
)
<
from ?
from : addr(
)
,
75
align)
;
76
return
(
a >=
addr(
)
)
&&
_sum_in_range(
a,
n)
&&
77
(
a -
addr(
)
+
n <=
avail(
)
)
&&
(
a +
n -
1 <=
to)
;
78
}
79
80
public
:
81
82
/**
83
* Avl_node interface: compare two nodes
84
*/
85
bool
higher(
Block *
a
)
{
86
return
a->
_addr >=
_addr
;
}
87
88
/**
89
* Avl_node interface: update meta data on node rearrangement
90
*/
91
void
recompute(
)
;
92
93
94
/*****************
95
** Accessorors **
96
*****************/
97
98
inline
int
id(
)
const
{
return
_id
;
}
99
inline
addr_t
addr(
)
const
{
return
_addr
;
}
100
inline
size_t
avail(
)
const
{
return
_used ?
0 : _size
;
}
101
inline
size_t
size(
)
const
{
return
_size
;
}
102
inline
bool
used(
)
const
{
return
_used
;
}
103
inline
size_t
max_avail(
)
const
{
return
_max_avail
;
}
104
105
inline
void
used(
bool
used
)
{
_used =
used;
}
106
107
108
enum
{
FREE =
false
,
USED =
true
}
;
109
110
111
/**
112
* Constructor
113
*
114
* This constructor is called from meta-data allocator during
115
* initialization of new meta-data blocks.
116
*/
117
Block(
)
:
_addr(
0)
,
_size(
0)
,
_used(
0)
,
_max_avail(
0)
{
}
118
119
/**
120
* Constructor
121
*/
122
Block(
addr_t
addr
,
size_t
size
,
bool
used
)
123
:
_addr(
addr)
,
_size(
size)
,
_used(
used)
,
124
_max_avail(
used ?
0 : size)
125
{
126
static
int num_blocks;
127
_id =
++
num_blocks;
128
}
129
130
/**
131
* Find best-fitting block
132
*/
133
Block *
find_best_fit(
size_t
size
,
unsigned
align
=
1
,
134
addr_t
from
=
0UL
,
addr_t
to
=
~
0UL
)
;
135
136
/**
137
* Find block that contains the specified address range
138
*/
139
Block *
find_by_address(
addr_t
addr
,
size_t
size
=
0
,
140
bool
check_overlap
=
0
)
;
141
142
/**
143
* Return sum of available memory in subtree
144
*/
145
size_t
avail_in_subtree(
void
)
;
146
147
/**
148
* Debug hook
149
*
150
* \noapi
151
*/
152
void
dump(
)
;
153
154
/**
155
* Debug hook
156
*
157
* \noapi
158
*/
159
void
dump_dot(
int
indent
=
0
)
;
160
}
;
161
162
private
:
163
164
Avl_tree<
Block
>
_addr_tree;
/* blocks sorted by base address */
165
Allocator *
_md_alloc;
/* meta-data allocator */
166
size_t _md_entry_size;
/* size of block meta-data entry */
167
168
/**
169
* Alloc meta-data block
170
*/
171
Block *
_alloc_block_metadata(
)
;
172
173
/**
174
* Alloc two meta-data blocks in a transactional way
175
*/
176
bool
_alloc_two_blocks_metadata(
Block *
*
dst1
,
Block *
*
dst2
)
;
177
178
/**
179
* Create new block
180
*/
181
int
_add_block(
Block *
block_metadata
,
182
addr_t
base
,
size_t
size
,
bool
used
)
;
183
184
/**
185
* Destroy block
186
*/
187
void
_destroy_block(
Block *
b
)
;
188
189
/**
190
* Cut specified area from block
191
*
192
* The original block gets replaced by (up to) two smaller blocks
193
* with remaining space.
194
*/
195
void
_cut_from_block(
Block *
b
,
addr_t
cut_addr
,
size_t
cut_size
,
196
Block *
dst1
,
Block *
dst2
)
;
197
198
protected
:
199
200
/**
201
* Find block by specified address
202
*/
203
Block *
_find_by_address(
addr_t
addr
,
size_t
size
=
0
,
204
bool
check_overlap
=
0
)
const
205
{
206
Block *
b =
static_cast
<
Block *
>
(
_addr_tree.
first(
)
)
;
207
208
/* if the tree has one or more nodes, start search */
209
return
b ?
b->
find_by_address(
addr,
size,
check_overlap)
: 0
;
210
}
211
212
/**
213
* Constructor
214
*
215
* This constructor can only be called from a derived class that
216
* provides an allocator for block meta-data entries. This way,
217
* we can attach custom information to block meta data.
218
*/
219
Allocator_avl_base(
Allocator *
md_alloc
,
size_t
md_entry_size
)
:
220
_md_alloc(
md_alloc)
,
_md_entry_size(
md_entry_size)
{
}
221
222
public
:
223
224
/**
225
* Return address of any block of the allocator
226
*
227
* \param out_addr result that contains address of block
228
* \return true if block was found or
229
* false if there is no block available
230
*
231
* If no block was found, out_addr is set to zero.
232
*/
233
bool
any_block_addr(
addr_t *
out_addr
)
;
234
235
/**
236
* Debug hook
237
*
238
* \noapi
239
*/
240
void
dump_addr_tree(
Block *
addr_node
=
0
)
;
241
242
243
/*******************************
244
** Range allocator interface **
245
*******************************/
246
247
int
add_range(
addr_t
base
,
size_t
size
)
override
;
248
int
remove_range(
addr_t
base
,
size_t
size
)
override
;
249
Alloc_return
alloc_aligned(
size_t
size
,
void *
*
out_addr
,
int
align
=
0
,
250
addr_t
from
=
0
,
addr_t
to
=
~
0UL
)
override
;
251
Alloc_return
alloc_addr(
size_t
size
,
addr_t
addr
)
override
;
252
void
free(
void *
addr
)
override
;
253
size_t
avail(
)
const
override
;
254
bool
valid_addr(
addr_t
addr
)
const
override
;
255
256
257
/*************************
258
** Allocator interface **
259
*************************/
260
261
bool
alloc(
size_t
size
,
void *
*
out_addr
)
override
{
262
return
(
Allocator_avl_base::
alloc_aligned(
size,
out_addr)
.
is_ok(
)
)
;
}
263
264
void
free(
void *
addr
,
size_t
)
override
{
free(
addr)
;
}
265
266
/**
267
* Return size of block at specified address
268
*/
269
size_t
size_at(
void const
*
addr
)
const
;
270
271
/**
272
* Return the memory overhead per Block
273
*
274
* The overhead is a rough estimation. If a block is somewhere
275
* in the middle of a free area, we could consider the meta data
276
* for the two free subareas when calculating the overhead.
277
*
278
* The `sizeof(umword_t)` represents the overhead of the meta-data
279
* slab allocator.
280
*/
281
size_t
overhead(
size_t
)
const
override
{
return
sizeof
(
Block)
+
sizeof
(
umword_t)
;
}
282
283
bool
need_size_for_free(
)
const
override
{
return
false
;
}
284
}
;
285
286
287
/**
288
* AVL-based allocator with custom meta data attached to each block.
289
*
290
* \param BMDT block meta-data type
291
*/
292
template
<
typename
BMDT
,
unsigned
SLAB_BLOCK_SIZE
>
293
class
Genode::
Allocator_avl_tpl :
public
Allocator_avl_base
294
{
295
protected
:
296
297
/*
298
* Pump up the Block class with custom meta-data type
299
*/
300
class
Block :
public
Allocator_avl_base::
Block,
public
BMDT
{
}
;
301
302
Tslab<
Block
,
SLAB_BLOCK_SIZE
>
_metadata;
/* meta-data allocator */
303
char _initial_md_block[SLAB_BLOCK_SIZE]
;
/* first (static) meta-data block */
304
305
public
:
306
307
/**
308
* Constructor
309
*
310
* \param metadata_chunk_alloc pointer to allocator used to allocate
311
* meta-data blocks. If set to 0,
312
* use ourself for allocating our
313
* meta-data blocks. This works only
314
* if the managed memory is completely
315
* accessible by the allocator.
316
*/
317
explicit
Allocator_avl_tpl(
Allocator *
metadata_chunk_alloc
)
:
318
Allocator_avl_base(
&
_metadata,
sizeof
(
Block)
)
,
319
_metadata(
(
metadata_chunk_alloc)
?
metadata_chunk_alloc : this,
320
(
Slab_block *
)
&
_initial_md_block)
{
}
321
322
/**
323
* Assign custom meta data to block at specified address
324
*/
325
void
metadata(
void *
addr
,
BMDT
bmd
)
const
326
{
327
Block *
b =
static_cast
<
Block *
>
(
_find_by_address(
(
addr_t)
addr)
)
;
328
if
(
b)
*
static_cast
<
BMDT *
>
(
b)
=
bmd;
329
}
330
331
/**
332
* Return meta data that was attached to block at specified address
333
*/
334
BMDT*
metadata(
void *
addr
)
const
335
{
336
Block *
b =
static_cast
<
Block *
>
(
_find_by_address(
(
addr_t)
addr)
)
;
337
return
b &&
b->
used(
)
?
b : 0
;
338
}
339
340
int
add_range(
addr_t
base
,
size_t
size
)
341
{
342
/*
343
* We disable the slab block allocation while
344
* processing add_range to prevent avalanche
345
* effects when (slab trying to make an allocation
346
* at Allocator_avl that is empty).
347
*/
348
Allocator *
md_bs =
_metadata.
backing_store(
)
;
349
_metadata.
backing_store(
0)
;
350
int ret =
Allocator_avl_base::
add_range(
base,
size)
;
351
_metadata.
backing_store(
md_bs)
;
352
return
ret
;
353
}
354
}
;
355
356
#
endif /* _INCLUDE__BASE__ALLOCATOR_AVL_H_ */