-
Notifications
You must be signed in to change notification settings - Fork 4
/
malloc.c
421 lines (381 loc) · 14.6 KB
/
malloc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
#if defined(__AVR__)
/* Indicate that we are ISR safe. */
#define __ISR_SAFE_MALLOC__ 1
/* Copyright (c) 2002, 2004, 2010 Joerg Wunsch
Copyright (c) 2010 Gerben van den Broeke
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holders nor the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* $Id: malloc.c 2149 2010-06-09 20:45:37Z joerg_wunsch $ */
#include <stdlib.h>
#include <string.h>
#include "sectionname.h"
#include "stdlib_private.h"
#include <avr/io.h>
#include <util/atomic.h>
#define XATOMIC_BLOCK(type) type, __ToDo; for (__ToDo = __iCliRetVal(); \
__ToDo ; __ToDo = 0 )
/*
* Exported interface:
*
* When extending the data segment, the allocator will not try to go
* beyond the current stack limit, decreased by __malloc_margin bytes.
* Thus, all possible stack frames of interrupt routines that could
* interrupt the current function, plus all further nested function
* calls must not require more stack space, or they'll risk to collide
* with the data segment.
*/
/* May be changed by the user only before the first malloc() call. */
size_t __malloc_margin = 128;
char *__malloc_heap_start = &__heap_start;
char *__malloc_heap_end = &__heap_end;
char *__brkval;
struct __freelist *__flp;
static
void *
_malloc(size_t len) {
struct __freelist *fp1, *fp2, *sfp1, *sfp2;
char *cp;
size_t s, avail;
/*
* Our minimum chunk size is the size of a pointer (plus the
* size of the "sz" field, but we don't need to account for
* this), otherwise we could not possibly fit a freelist entry
* into the chunk later.
*/
if(len < sizeof(struct __freelist) - sizeof(size_t))
len = sizeof(struct __freelist) - sizeof(size_t);
/*
* First, walk the free list and try finding a chunk that
* would match exactly. If we found one, we are done. While
* walking, note down the smallest chunk we found that would
* still fit the request -- we need it for step 2.
*
*/
for(s = 0, fp1 = __flp, fp2 = 0;
fp1;
fp2 = fp1, fp1 = fp1->nx) {
if(fp1->sz < len)
continue;
if(fp1->sz == len) {
/*
* Found it. Disconnect the chunk from the
* freelist, and return it.
*/
if(fp2)
fp2->nx = fp1->nx;
else
__flp = fp1->nx;
return &(fp1->nx);
} else {
if(s == 0 || fp1->sz < s) {
/* this is the smallest chunk found so far */
s = fp1->sz;
sfp1 = fp1;
sfp2 = fp2;
}
}
}
/*
* Step 2: If we found a chunk on the freelist that would fit
* (but was too large), look it up again and use it, since it
* is our closest match now. Since the freelist entry needs
* to be split into two entries then, watch out that the
* difference between the requested size and the size of the
* chunk found is large enough for another freelist entry; if
* not, just enlarge the request size to what we have found,
* and use the entire chunk.
*/
if(s) {
if(s - len < sizeof(struct __freelist)) {
/* Disconnect it from freelist and return it. */
if(sfp2)
sfp2->nx = sfp1->nx;
else
__flp = sfp1->nx;
return &(sfp1->nx);
}
/*
* Split them up. Note that we leave the first part
* as the new (smaller) freelist entry, and return the
* upper portion to the caller. This saves us the
* work to fix up the freelist chain; we just need to
* fixup the size of the current entry, and note down
* the size of the new chunk before returning it to
* the caller.
*/
cp = (char *) sfp1;
s -= len;
cp += s;
sfp2 = (struct __freelist *) cp;
sfp2->sz = len;
sfp1->sz = s - sizeof(size_t);
return &(sfp2->nx);
}
/*
* Step 3: If the request could not be satisfied from a
* freelist entry, just prepare a new chunk. This means we
* need to obtain more memory first. The largest address just
* not allocated so far is remembered in the brkval variable.
* Under Unix, the "break value" was the end of the data
* segment as dynamically requested from the operating system.
* Since we don't have an operating system, just make sure
* that we don't collide with the stack.
*/
if(__brkval == NULL)
__brkval = __malloc_heap_start;
cp = __malloc_heap_end;
if(cp == 0)
cp = STACK_POINTER() - __malloc_margin;
if(cp <= __brkval)
/*
* Memory exhausted.
*/
return NULL;
avail = cp - __brkval;
/*
* Both tests below are needed to catch the case len >= 0xfffe.
*/
if(avail >= len && avail >= len + sizeof(size_t)) {
fp1 = (struct __freelist *) __brkval;
__brkval += len + sizeof(size_t);
fp1->sz = len;
return &(fp1->nx);
}
/*
* Step 4: There's no help, just fail. :-/
*/
return NULL;
}
static
void
_free(void *p) {
struct __freelist *fp1, *fp2, *fpnew;
char *cp1, *cp2, *cpnew;
/* ISO C says free(NULL) must be a no-op */
if(p == NULL)
return;
if((char *)p < __malloc_heap_start) return; // Don't free, out of range.
cpnew = p;
cpnew -= sizeof(size_t);
fpnew = (struct __freelist *) cpnew;
fpnew->nx = NULL;
/*
* Trivial case first: if there's no freelist yet, our entry
* will be the only one on it. If this is the last entry, we
* can reduce __brkval instead.
*/
if(__flp == NULL) {
if((char *) p + fpnew->sz == __brkval)
__brkval = cpnew;
else
__flp = fpnew;
return;
}
/*
* Now, find the position where our new entry belongs onto the
* freelist. Try to aggregate the chunk with adjacent chunks
* if possible.
*/
for(fp1 = __flp, fp2 = NULL;
fp1;
fp2 = fp1, fp1 = fp1->nx) {
if(fp1 < fpnew)
continue;
cp1 = (char *) fp1;
fpnew->nx = fp1;
if((char *) &(fpnew->nx) + fpnew->sz == cp1) {
/* upper chunk adjacent, assimilate it */
fpnew->sz += fp1->sz + sizeof(size_t);
fpnew->nx = fp1->nx;
}
if(fp2 == NULL) {
/* new head of freelist */
__flp = fpnew;
return;
}
break;
}
/*
* Note that we get here either if we hit the "break" above,
* or if we fell off the end of the loop. The latter means
* we've got a new topmost chunk. Either way, try aggregating
* with the lower chunk if possible.
*/
fp2->nx = fpnew;
cp2 = (char *) &(fp2->nx);
if(cp2 + fp2->sz == cpnew) {
/* lower junk adjacent, merge */
fp2->sz += fpnew->sz + sizeof(size_t);
fp2->nx = fpnew->nx;
}
/*
* If there's a new topmost chunk, lower __brkval instead.
*/
for(fp1 = __flp, fp2 = NULL;
fp1->nx != 0;
fp2 = fp1, fp1 = fp1->nx)
/* advance to entry just before end of list */;
cp2 = (char *) &(fp1->nx);
if(cp2 + fp1->sz == __brkval) {
if(fp2 == NULL)
/* Freelist is empty now. */
__flp = NULL;
else
fp2->nx = NULL;
__brkval = cp2 - sizeof(size_t);
}
}
static
void *
_realloc(void *ptr, size_t len) {
struct __freelist *fp1, *fp2, *fp3, *ofp3;
char *cp, *cp1;
void *memp;
size_t s, incr;
/* Trivial case, required by C standard. */
if(ptr == NULL)
return _malloc(len);
if((char *)ptr < __malloc_heap_start) goto move_it; // Don't extend, and won't free.
cp1 = (char *) ptr;
cp1 -= sizeof(size_t);
fp1 = (struct __freelist *) cp1;
cp = (char *) ptr + len; /* new next pointer */
if(cp < cp1)
/* Pointer wrapped across top of RAM, fail. */
return NULL;
/*
* See whether we are growing or shrinking. When shrinking,
* we split off a chunk for the released portion, and call
* free() on it. Therefore, we can only shrink if the new
* size is at least sizeof(struct __freelist) smaller than the
* previous size.
*/
if(len <= fp1->sz) {
/* The first test catches a possible unsigned int
* rollover condition. */
if(fp1->sz <= sizeof(struct __freelist) ||
len > fp1->sz - sizeof(struct __freelist))
return ptr;
fp2 = (struct __freelist *) cp;
fp2->sz = fp1->sz - len - sizeof(size_t);
fp1->sz = len;
_free(&(fp2->nx));
return ptr;
}
/*
* If we get here, we are growing. First, see whether there
* is space in the free list on top of our current chunk.
*/
incr = len - fp1->sz;
cp = (char *) ptr + fp1->sz;
fp2 = (struct __freelist *) cp;
for(s = 0, ofp3 = NULL, fp3 = __flp;
fp3;
ofp3 = fp3, fp3 = fp3->nx) {
if(fp3 == fp2 && fp3->sz + sizeof(size_t) >= incr) {
/* found something that fits */
if(fp3->sz + sizeof(size_t) - incr > sizeof(struct __freelist)) {
/* split off a new freelist entry */
cp = (char *) ptr + len;
fp2 = (struct __freelist *) cp;
fp2->nx = fp3->nx;
fp2->sz = fp3->sz - incr;
fp1->sz = len;
} else {
/* it just fits, so use it entirely */
fp1->sz += fp3->sz + sizeof(size_t);
fp2 = fp3->nx;
}
if(ofp3)
ofp3->nx = fp2;
else
__flp = fp2;
return ptr;
}
/*
* Find the largest chunk on the freelist while
* walking it.
*/
if(fp3->sz > s)
s = fp3->sz;
}
/*
* If we are the topmost chunk in memory, and there was no
* large enough chunk on the freelist that could be re-used
* (by a call to malloc() below), quickly extend the
* allocation area if possible, without need to copy the old
* data.
*/
if(__brkval == (char *) ptr + fp1->sz && len > s) {
cp1 = __malloc_heap_end;
cp = (char *) ptr + len;
if(cp1 == 0)
cp1 = STACK_POINTER() - __malloc_margin;
if(cp < cp1) {
__brkval = cp;
fp1->sz = len;
return ptr;
}
/* If that failed, we are out of luck. */
return NULL;
}
/*
* Call malloc() for a new chunk, then copy over the data, and
* release the old region.
*/
move_it:
if((memp = _malloc(len)) == NULL)
return NULL;
memcpy(memp, ptr, fp1->sz);
_free(ptr);
return memp;
}
/* thread/irq/task safe wrappers */
ATTRIBUTE_CLIB_SECTION
void *
malloc(size_t len) {
register void *p;
XATOMIC_BLOCK(ATOMIC_RESTORESTATE) {
p = _malloc(len);
}
return p;
}
ATTRIBUTE_CLIB_SECTION
void
free(void *p) {
XATOMIC_BLOCK(ATOMIC_RESTORESTATE) {
_free(p);
}
}
ATTRIBUTE_CLIB_SECTION
void *
realloc(void *ptr, size_t len) {
register void *p;
XATOMIC_BLOCK(ATOMIC_RESTORESTATE) {
p = _realloc(ptr, len);
}
return p;
}
#endif