1
/*
2
* \brief Heap partition
3
* \author Norman Feske
4
* \date 2006-05-15
5
*/
6
7
/*
8
* Copyright (C) 2006-2013 Genode Labs GmbH
9
*
10
* This file is part of the Genode OS framework, which is distributed
11
* under the terms of the GNU General Public License version 2.
12
*/
13
14
#
ifndef _INCLUDE__BASE__HEAP_H_
15
#
define _INCLUDE__BASE__HEAP_H_
16
17
#
include <util/list.h>
18
#
include <ram_session/ram_session.h>
19
#
include <rm_session/rm_session.h>
20
#
include <base/allocator_avl.h>
21
#
include <base/lock.h>
22
23
namespace
Genode {
24
25
class
Heap;
26
class
Sliced_heap;
27
}
28
29
30
/**
31
* Heap that uses dataspaces as backing store
32
*
33
* The heap class provides an allocator that uses a list of dataspaces of a RAM
34
* session as backing store. One dataspace may be used for holding multiple blocks.
35
*/
36
class
Genode::
Heap :
public
Allocator
37
{
38
private
:
39
40
enum
{
41
MIN_CHUNK_SIZE =
4*
1024
,
/* in machine words */
42
MAX_CHUNK_SIZE =
256*
1024
,
43
/*
44
* Meta data includes the Dataspace structure and meta data of
45
* the AVL allocator.
46
*/
47
META_DATA_SIZE =
1024
,
/* in bytes */
48
/*
49
* Allocation sizes >= this value are considered as big
50
* allocations, which get their own dataspace. In contrast
51
* to smaller allocations, this memory is released to
52
* the RAM session when `free()` is called.
53
*/
54
BIG_ALLOCATION_THRESHOLD =
64*
1024
/* in bytes */
55
}
;
56
57
class
Dataspace :
public
List<
Dataspace
>
::
Element
58
{
59
public
:
60
61
Ram_dataspace_capability cap;
62
void *
local_addr;
63
size_t size;
64
65
Dataspace(
Ram_dataspace_capability
c
,
void *
local_addr
,
size_t
size
)
66
:
cap(
c)
,
local_addr(
local_addr)
,
size(
size)
{
}
67
68
inline
void *
operator
new
(
Genode::
size_t,
void*
addr)
{
69
return
addr
;
}
70
inline
void
operator
delete(
void*
)
{
}
71
}
;
72
73
/*
74
* This structure exists only to make sure that the dataspaces are
75
* destroyed after the AVL allocator.
76
*/
77
struct
Dataspace_pool :
public
List<
Dataspace
>
78
{
79
Ram_session *
ram_session;
/* RAM session for backing store */
80
Rm_session *
rm_session;
/* region manager */
81
82
Dataspace_pool(
Ram_session *
ram_session
,
Rm_session *
rm_session
)
83
:
ram_session(
ram_session)
,
rm_session(
rm_session)
{
}
84
85
/**
86
* Destructor
87
*/
88
~
Dataspace_pool(
)
;
89
90
void
reassign_resources(
Ram_session *
ram
,
Rm_session *
rm
)
{
91
ram_session =
ram,
rm_session =
rm;
}
92
}
;
93
94
/*
95
* NOTE: The order of the member variables is important for
96
* the calling order of the destructors!
97
*/
98
99
Lock _lock;
100
Dataspace_pool _ds_pool;
/* list of dataspaces */
101
Allocator_avl _alloc;
/* local allocator */
102
size_t _quota_limit;
103
size_t _quota_used;
104
size_t _chunk_size;
105
106
/**
107
* Allocate a new dataspace of the specified size
108
*
109
* \param size number of bytes to allocate
110
* \param enforce_separate_metadata if true, the new dataspace
111
* will not contain any meta data
112
* \throw Rm_session::Invalid_dataspace,
113
* Rm_session::Region_conflict
114
* \return 0 on success or negative error code
115
*/
116
Heap::
Dataspace *
_allocate_dataspace(
size_t
size
,
bool
enforce_separate_metadata
)
;
117
118
/**
119
* Try to allocate block at our local allocator
120
*
121
* \return true on success
122
*
123
* This method is a utility used by `_unsynchronized_alloc` to
124
* avoid code duplication.
125
*/
126
bool
_try_local_alloc(
size_t
size
,
void *
*
out_addr
)
;
127
128
/**
129
* Unsynchronized implementation of `alloc`
130
*/
131
bool
_unsynchronized_alloc(
size_t
size
,
void *
*
out_addr
)
;
132
133
public
:
134
135
enum
{
UNLIMITED =
~
0
}
;
136
137
Heap(
Ram_session *
ram_session
,
138
Rm_session *
rm_session
,
139
size_t
quota_limit
=
UNLIMITED
,
140
void *
static_addr
=
0
,
141
size_t
static_size
=
0
)
142
:
143
_ds_pool(
ram_session,
rm_session)
,
144
_alloc(
0)
,
145
_quota_limit(
quota_limit)
,
_quota_used(
0)
,
146
_chunk_size(
MIN_CHUNK_SIZE)
147
{
148
if
(
static_addr)
149
_alloc.
add_range(
(
addr_t)
static_addr,
static_size)
;
150
}
151
152
/**
153
* Reconfigure quota limit
154
*
155
* \return negative error code if new quota limit is higher than
156
* currently used quota.
157
*/
158
int
quota_limit(
size_t
new_quota_limit
)
;
159
160
/**
161
* Re-assign RAM and RM sessions
162
*/
163
void
reassign_resources(
Ram_session *
ram
,
Rm_session *
rm
)
{
164
_ds_pool.
reassign_resources(
ram,
rm)
;
}
165
166
167
/*************************
168
** Allocator interface **
169
*************************/
170
171
bool
alloc(
size_t
,
void *
*
)
override
;
172
void
free(
void *
,
size_t
)
override
;
173
size_t
consumed(
)
const
override
{
return
_quota_used
;
}
174
size_t
overhead(
size_t
size
)
const
override
{
return
_alloc.
overhead(
size)
;
}
175
bool
need_size_for_free(
)
const
override
{
return
false
;
}
176
}
;
177
178
179
/**
180
* Heap that allocates each block at a separate dataspace
181
*/
182
class
Genode::
Sliced_heap :
public
Allocator
183
{
184
private
:
185
186
class
Block;
187
188
Ram_session *
_ram_session;
/* RAM session for backing store */
189
Rm_session *
_rm_session;
/* region manager */
190
size_t _consumed;
/* number of allocated bytes */
191
List<
Block
>
_block_list;
/* list of allocated blocks */
192
Lock _lock;
/* serialize allocations */
193
194
public
:
195
196
/**
197
* Constructor
198
*/
199
Sliced_heap(
Ram_session *
ram_session
,
Rm_session *
rm_session
)
;
200
201
/**
202
* Destructor
203
*/
204
~
Sliced_heap(
)
;
205
206
207
/*************************
208
** Allocator interface **
209
*************************/
210
211
bool
alloc(
size_t
,
void *
*
)
;
212
void
free(
void *
,
size_t
)
;
213
size_t
consumed(
)
const
{
return
_consumed
;
}
214
size_t
overhead(
size_t
size
)
const
;
215
bool
need_size_for_free(
)
const
override
{
return
false
;
}
216
}
;
217
218
#
endif /* _INCLUDE__BASE__HEAP_H_ */