Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2019 Intel Corporation
3 : : *
4 : : * SPDX-License-Identifier: Apache-2.0
5 : : */
6 : : #include <sys/sys_heap.h>
7 : : #include <sys/util.h>
8 : : #include <kernel.h>
9 : : #include "heap.h"
10 : :
11 : : /* White-box sys_heap validation code. Uses internal data structures.
12 : : * Not expected to be useful in production apps. This checks every
13 : : * header field of every chunk and returns true if the totality of the
14 : : * data structure is a valid heap. It doesn't necessarily tell you
15 : : * that it is the CORRECT heap given the history of alloc/free calls
16 : : * that it can't inspect. In a pathological case, you can imagine
17 : : * something scribbling a copy of a previously-valid heap on top of a
18 : : * running one and corrupting it. YMMV.
19 : : */
20 : :
21 : : #define VALIDATE(cond) do { if (!(cond)) { return false; } } while (0)
22 : :
23 : 0 : static bool in_bounds(struct z_heap *h, chunkid_t c)
24 : : {
25 [ # # ]: 0 : VALIDATE(c >= right_chunk(h, 0));
26 [ # # ]: 0 : VALIDATE(c < h->end_chunk);
27 [ # # ]: 0 : VALIDATE(chunk_size(h, c) < h->end_chunk);
28 : 0 : return true;
29 : : }
30 : :
31 : 0 : static bool valid_chunk(struct z_heap *h, chunkid_t c)
32 : : {
33 [ # # ]: 0 : VALIDATE(chunk_size(h, c) > 0);
34 [ # # ]: 0 : VALIDATE(c + chunk_size(h, c) <= h->end_chunk);
35 [ # # ]: 0 : VALIDATE(in_bounds(h, c));
36 [ # # ]: 0 : VALIDATE(right_chunk(h, left_chunk(h, c)) == c);
37 [ # # ]: 0 : VALIDATE(left_chunk(h, right_chunk(h, c)) == c);
38 [ # # ]: 0 : if (chunk_used(h, c)) {
39 [ # # ]: 0 : VALIDATE(!solo_free_header(h, c));
40 : : } else {
41 [ # # ]: 0 : VALIDATE(chunk_used(h, left_chunk(h, c)));
42 [ # # ]: 0 : VALIDATE(chunk_used(h, right_chunk(h, c)));
43 [ # # ]: 0 : if (!solo_free_header(h, c)) {
44 [ # # ]: 0 : VALIDATE(in_bounds(h, prev_free_chunk(h, c)));
45 [ # # ]: 0 : VALIDATE(in_bounds(h, next_free_chunk(h, c)));
46 : : }
47 : : }
48 : 0 : return true;
49 : : }
50 : :
51 : : /* Validate multiple state dimensions for the bucket "next" pointer
52 : : * and see that they match. Probably should unify the design a
53 : : * bit...
54 : : */
55 : 0 : static inline void check_nexts(struct z_heap *h, int bidx)
56 : : {
57 : 0 : struct z_heap_bucket *b = &h->buckets[bidx];
58 : :
59 : 0 : bool emptybit = (h->avail_buckets & BIT(bidx)) == 0;
60 : 0 : bool emptylist = b->next == 0;
61 : 0 : bool empties_match = emptybit == emptylist;
62 : :
63 : : (void)empties_match;
64 : : CHECK(empties_match);
65 : :
66 : 0 : if (b->next != 0) {
67 : : CHECK(valid_chunk(h, b->next));
68 : : }
69 : 0 : }
70 : :
71 : 0 : static void get_alloc_info(struct z_heap *h, size_t *alloc_bytes,
72 : : size_t *free_bytes)
73 : : {
74 : : chunkid_t c;
75 : :
76 : 0 : *alloc_bytes = 0;
77 : 0 : *free_bytes = 0;
78 : :
79 [ # # ]: 0 : for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
80 [ # # ]: 0 : if (chunk_used(h, c)) {
81 : 0 : *alloc_bytes += chunksz_to_bytes(h, chunk_size(h, c));
82 [ # # ]: 0 : } else if (!solo_free_header(h, c)) {
83 : 0 : *free_bytes += chunksz_to_bytes(h, chunk_size(h, c));
84 : : }
85 : : }
86 : 0 : }
87 : :
88 : 0 : bool sys_heap_validate(struct sys_heap *heap)
89 : : {
90 : 0 : struct z_heap *h = heap->heap;
91 : : chunkid_t c;
92 : :
93 : : /*
94 : : * Walk through the chunks linearly, verifying sizes and end pointer.
95 : : */
96 [ # # ]: 0 : for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
97 [ # # ]: 0 : if (!valid_chunk(h, c)) {
98 : 0 : return false;
99 : : }
100 : : }
101 [ # # ]: 0 : if (c != h->end_chunk) {
102 : 0 : return false; /* Should have exactly consumed the buffer */
103 : : }
104 : :
105 : : #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
106 : : /*
107 : : * Validate sys_heap_runtime_stats_get API.
108 : : * Iterate all chunks in sys_heap to get total allocated bytes and
109 : : * free bytes, then compare with the results of
110 : : * sys_heap_runtime_stats_get function.
111 : : */
112 : : size_t allocated_bytes, free_bytes;
113 : : struct sys_heap_runtime_stats stat;
114 : :
115 : : get_alloc_info(h, &allocated_bytes, &free_bytes);
116 : : sys_heap_runtime_stats_get(heap, &stat);
117 : : if ((stat.allocated_bytes != allocated_bytes) ||
118 : : (stat.free_bytes != free_bytes)) {
119 : : return false;
120 : : }
121 : : #endif
122 : :
123 : : /* Check the free lists: entry count should match, empty bit
124 : : * should be correct, and all chunk entries should point into
125 : : * valid unused chunks. Mark those chunks USED, temporarily.
126 : : */
127 [ # # ]: 0 : for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
128 : 0 : chunkid_t c0 = h->buckets[b].next;
129 : 0 : uint32_t n = 0;
130 : :
131 : 0 : check_nexts(h, b);
132 : :
133 [ # # # # : 0 : for (c = c0; c != 0 && (n == 0 || c != c0);
# # ]
134 : 0 : n++, c = next_free_chunk(h, c)) {
135 [ # # ]: 0 : if (!valid_chunk(h, c)) {
136 : 0 : return false;
137 : : }
138 : 0 : set_chunk_used(h, c, true);
139 : : }
140 : :
141 : 0 : bool empty = (h->avail_buckets & BIT(b)) == 0;
142 : 0 : bool zero = n == 0;
143 : :
144 [ # # ]: 0 : if (empty != zero) {
145 : 0 : return false;
146 : : }
147 : :
148 [ # # # # ]: 0 : if (empty && h->buckets[b].next != 0) {
149 : 0 : return false;
150 : : }
151 : : }
152 : :
153 : : /*
154 : : * Walk through the chunks linearly again, verifying that all chunks
155 : : * but solo headers are now USED (i.e. all free blocks were found
156 : : * during enumeration). Mark all such blocks UNUSED and solo headers
157 : : * USED.
158 : : */
159 : 0 : chunkid_t prev_chunk = 0;
160 [ # # ]: 0 : for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
161 [ # # # # ]: 0 : if (!chunk_used(h, c) && !solo_free_header(h, c)) {
162 : 0 : return false;
163 : : }
164 [ # # ]: 0 : if (left_chunk(h, c) != prev_chunk) {
165 : 0 : return false;
166 : : }
167 : 0 : prev_chunk = c;
168 : :
169 : 0 : set_chunk_used(h, c, solo_free_header(h, c));
170 : : }
171 [ # # ]: 0 : if (c != h->end_chunk) {
172 : 0 : return false; /* Should have exactly consumed the buffer */
173 : : }
174 : :
175 : : /* Go through the free lists again checking that the linear
176 : : * pass caught all the blocks and that they now show UNUSED.
177 : : * Mark them USED.
178 : : */
179 [ # # ]: 0 : for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
180 : 0 : chunkid_t c0 = h->buckets[b].next;
181 : 0 : int n = 0;
182 : :
183 [ # # ]: 0 : if (c0 == 0) {
184 : 0 : continue;
185 : : }
186 : :
187 [ # # # # ]: 0 : for (c = c0; n == 0 || c != c0; n++, c = next_free_chunk(h, c)) {
188 [ # # ]: 0 : if (chunk_used(h, c)) {
189 : 0 : return false;
190 : : }
191 : 0 : set_chunk_used(h, c, true);
192 : : }
193 : : }
194 : :
195 : : /* Now we are valid, but have managed to invert all the in-use
196 : : * fields. One more linear pass to fix them up
197 : : */
198 [ # # ]: 0 : for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
199 : 0 : set_chunk_used(h, c, !chunk_used(h, c));
200 : : }
201 : 0 : return true;
202 : : }
203 : :
204 : : struct z_heap_stress_rec {
205 : : void *(*alloc_fn)(void *arg, size_t bytes);
206 : : void (*free_fn)(void *arg, void *p);
207 : : void *arg;
208 : : size_t total_bytes;
209 : : struct z_heap_stress_block *blocks;
210 : : size_t nblocks;
211 : : size_t blocks_alloced;
212 : : size_t bytes_alloced;
213 : : uint32_t target_percent;
214 : : };
215 : :
216 : : struct z_heap_stress_block {
217 : : void *ptr;
218 : : size_t sz;
219 : : };
220 : :
221 : : /* Very simple LCRNG (from https://nuclear.llnl.gov/CNP/rng/rngman/node4.html)
222 : : *
223 : : * Here to guarantee cross-platform test repeatability.
224 : : */
225 : 0 : static uint32_t rand32(void)
226 : : {
227 : : static uint64_t state = 123456789; /* seed */
228 : :
229 : 0 : state = state * 2862933555777941757UL + 3037000493UL;
230 : :
231 : 0 : return (uint32_t)(state >> 32);
232 : : }
233 : :
234 : 0 : static bool rand_alloc_choice(struct z_heap_stress_rec *sr)
235 : : {
236 : : /* Edge cases: no blocks allocated, and no space for a new one */
237 [ # # ]: 0 : if (sr->blocks_alloced == 0) {
238 : 0 : return true;
239 [ # # ]: 0 : } else if (sr->blocks_alloced >= sr->nblocks) {
240 : 0 : return false;
241 : : } else {
242 : :
243 : : /* The way this works is to scale the chance of choosing to
244 : : * allocate vs. free such that it's even odds when the heap is
245 : : * at the target percent, with linear tapering on the low
246 : : * slope (i.e. we choose to always allocate with an empty
247 : : * heap, allocate 50% of the time when the heap is exactly at
248 : : * the target, and always free when above the target). In
249 : : * practice, the operations aren't quite symmetric (you can
250 : : * always free, but your allocation might fail), and the units
251 : : * aren't matched (we're doing math based on bytes allocated
252 : : * and ignoring the overhead) but this is close enough. And
253 : : * yes, the math here is coarse (in units of percent), but
254 : : * that's good enough and fits well inside 32 bit quantities.
255 : : * (Note precision issue when heap size is above 40MB
256 : : * though!).
257 : : */
258 [ # # ]: 0 : __ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!");
259 : 0 : uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes;
260 [ # # ]: 0 : uint32_t target = sr->target_percent ? sr->target_percent : 1;
261 : 0 : uint32_t free_chance = 0xffffffffU;
262 : :
263 [ # # ]: 0 : if (full_pct < sr->target_percent) {
264 : 0 : free_chance = full_pct * (0x80000000U / target);
265 : : }
266 : :
267 : 0 : return rand32() > free_chance;
268 : : }
269 : : }
270 : :
271 : : /* Chooses a size of block to allocate, logarithmically favoring
272 : : * smaller blocks (i.e. blocks twice as large are half as frequent
273 : : */
274 : 0 : static size_t rand_alloc_size(struct z_heap_stress_rec *sr)
275 : : {
276 : : ARG_UNUSED(sr);
277 : :
278 : : /* Min scale of 4 means that the half of the requests in the
279 : : * smallest size have an average size of 8
280 : : */
281 : 0 : int scale = 4 + __builtin_clz(rand32());
282 : :
283 : 0 : return rand32() & BIT_MASK(scale);
284 : : }
285 : :
286 : : /* Returns the index of a randomly chosen block to free */
287 : 0 : static size_t rand_free_choice(struct z_heap_stress_rec *sr)
288 : : {
289 : 0 : return rand32() % sr->blocks_alloced;
290 : : }
291 : :
292 : : /* General purpose heap stress test. Takes function pointers to allow
293 : : * for testing multiple heap APIs with the same rig. The alloc and
294 : : * free functions are passed back the argument as a context pointer.
295 : : * The "log" function is for readable user output. The total_bytes
296 : : * argument should reflect the size of the heap being tested. The
297 : : * scratch array is used to store temporary state and should be sized
298 : : * about half as large as the heap itself. Returns true on success.
299 : : */
300 : 0 : void sys_heap_stress(void *(*alloc_fn)(void *arg, size_t bytes),
301 : : void (*free_fn)(void *arg, void *p),
302 : : void *arg, size_t total_bytes,
303 : : uint32_t op_count,
304 : : void *scratch_mem, size_t scratch_bytes,
305 : : int target_percent,
306 : : struct z_heap_stress_result *result)
307 : : {
308 : 0 : struct z_heap_stress_rec sr = {
309 : : .alloc_fn = alloc_fn,
310 : : .free_fn = free_fn,
311 : : .arg = arg,
312 : : .total_bytes = total_bytes,
313 : : .blocks = scratch_mem,
314 : 0 : .nblocks = scratch_bytes / sizeof(struct z_heap_stress_block),
315 : : .target_percent = target_percent,
316 : : };
317 : :
318 : 0 : *result = (struct z_heap_stress_result) {0};
319 : :
320 [ # # ]: 0 : for (uint32_t i = 0; i < op_count; i++) {
321 [ # # ]: 0 : if (rand_alloc_choice(&sr)) {
322 : 0 : size_t sz = rand_alloc_size(&sr);
323 : 0 : void *p = sr.alloc_fn(sr.arg, sz);
324 : :
325 : 0 : result->total_allocs++;
326 [ # # ]: 0 : if (p != NULL) {
327 : 0 : result->successful_allocs++;
328 : 0 : sr.blocks[sr.blocks_alloced].ptr = p;
329 : 0 : sr.blocks[sr.blocks_alloced].sz = sz;
330 : 0 : sr.blocks_alloced++;
331 : 0 : sr.bytes_alloced += sz;
332 : : }
333 : : } else {
334 : 0 : int b = rand_free_choice(&sr);
335 : 0 : void *p = sr.blocks[b].ptr;
336 : 0 : size_t sz = sr.blocks[b].sz;
337 : :
338 : 0 : result->total_frees++;
339 : 0 : sr.blocks[b] = sr.blocks[sr.blocks_alloced - 1];
340 : 0 : sr.blocks_alloced--;
341 : 0 : sr.bytes_alloced -= sz;
342 : 0 : sr.free_fn(sr.arg, p);
343 : : }
344 : 0 : result->accumulated_in_use_bytes += sr.bytes_alloced;
345 : : }
346 : 0 : }
347 : :
348 : : /*
349 : : * Print heap info for debugging / analysis purpose
350 : : */
351 : 0 : void heap_print_info(struct z_heap *h, bool dump_chunks)
352 : : {
353 : 0 : int i, nb_buckets = bucket_idx(h, h->end_chunk) + 1;
354 : : size_t free_bytes, allocated_bytes, total, overhead;
355 : :
356 : 0 : printk("Heap at %p contains %d units in %d buckets\n\n",
357 : : chunk_buf(h), h->end_chunk, nb_buckets);
358 : :
359 : 0 : printk(" bucket# min units total largest largest\n"
360 : : " threshold chunks (units) (bytes)\n"
361 : : " -----------------------------------------------------------\n");
362 [ # # ]: 0 : for (i = 0; i < nb_buckets; i++) {
363 : 0 : chunkid_t first = h->buckets[i].next;
364 : 0 : chunksz_t largest = 0;
365 : 0 : int count = 0;
366 : :
367 [ # # ]: 0 : if (first) {
368 : 0 : chunkid_t curr = first;
369 : : do {
370 : 0 : count++;
371 [ # # ]: 0 : largest = MAX(largest, chunk_size(h, curr));
372 : 0 : curr = next_free_chunk(h, curr);
373 [ # # ]: 0 : } while (curr != first);
374 : : }
375 [ # # ]: 0 : if (count) {
376 : 0 : printk("%9d %12d %12d %12d %12zd\n",
377 : 0 : i, (1 << i) - 1 + min_chunk_size(h), count,
378 : : largest, chunksz_to_bytes(h, largest));
379 : : }
380 : : }
381 : :
382 [ # # ]: 0 : if (dump_chunks) {
383 : 0 : printk("\nChunk dump:\n");
384 : 0 : for (chunkid_t c = 0; ; c = right_chunk(h, c)) {
385 [ # # ]: 0 : printk("chunk %4d: [%c] size=%-4d left=%-4d right=%d\n",
386 : : c,
387 : 0 : chunk_used(h, c) ? '*'
388 : 0 : : solo_free_header(h, c) ? '.'
389 [ # # ]: 0 : : '-',
390 : : chunk_size(h, c),
391 : : left_chunk(h, c),
392 : : right_chunk(h, c));
393 [ # # ]: 0 : if (c == h->end_chunk) {
394 : 0 : break;
395 : : }
396 : : }
397 : : }
398 : :
399 : 0 : get_alloc_info(h, &allocated_bytes, &free_bytes);
400 : : /* The end marker chunk has a header. It is part of the overhead. */
401 : 0 : total = h->end_chunk * CHUNK_UNIT + chunk_header_bytes(h);
402 : 0 : overhead = total - free_bytes - allocated_bytes;
403 : 0 : printk("\n%zd free bytes, %zd allocated bytes, overhead = %zd bytes (%zd.%zd%%)\n",
404 : : free_bytes, allocated_bytes, overhead,
405 : 0 : (1000 * overhead + total/2) / total / 10,
406 : 0 : (1000 * overhead + total/2) / total % 10);
407 : 0 : }
408 : :
409 : 0 : void sys_heap_print_info(struct sys_heap *heap, bool dump_chunks)
410 : : {
411 : 0 : heap_print_info(heap->heap, dump_chunks);
412 : 0 : }
413 : :
414 : : #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
415 : :
416 : : int sys_heap_runtime_stats_get(struct sys_heap *heap,
417 : : struct sys_heap_runtime_stats *stats)
418 : : {
419 : : if ((heap == NULL) || (stats == NULL)) {
420 : : return -EINVAL;
421 : : }
422 : :
423 : : stats->free_bytes = heap->heap->free_bytes;
424 : : stats->allocated_bytes = heap->heap->allocated_bytes;
425 : : stats->max_allocated_bytes = heap->heap->max_allocated_bytes;
426 : :
427 : : return 0;
428 : : }
429 : :
430 : : int sys_heap_runtime_stats_reset_max(struct sys_heap *heap)
431 : : {
432 : : if (heap == NULL) {
433 : : return -EINVAL;
434 : : }
435 : :
436 : : heap->heap->max_allocated_bytes = heap->heap->allocated_bytes;
437 : :
438 : : return 0;
439 : : }
440 : :
441 : : #endif
|