source: svn/trunk/newcon3bcm2_21bu/magnum/basemodules/mem/bmem_priv.h @ 3

Last change on this file since 3 was 2, checked in by phkim, 11 years ago

1.phkim

  1. revision copy newcon3sk r27
  • Property svn:executable set to *
File size: 28.6 KB
Line 
1/***************************************************************************
2 *     Copyright (c) 2001-2012, Broadcom Corporation
3 *     All Rights Reserved
4 *     Confidential Property of Broadcom Corporation
5 *
6 *  THIS SOFTWARE MAY ONLY BE USED SUBJECT TO AN EXECUTED SOFTWARE LICENSE
7 *  AGREEMENT  BETWEEN THE USER AND BROADCOM.  YOU HAVE NO RIGHT TO USE OR
8 *  EXPLOIT THIS MATERIAL EXCEPT SUBJECT TO THE TERMS OF SUCH AN AGREEMENT.
9 *
10 * $brcm_Workfile: bmem_priv.h $
11 * $brcm_Revision: Hydra_Software_Devel/35 $
12 * $brcm_Date: 2/7/12 6:37p $
13 *
14 * Module Description:
15 *
16 * Revision History:
17 *
18 * $brcm_Log: /magnum/basemodules/mem/bmem_priv.h $
19 *
20 * Hydra_Software_Devel/35   2/7/12 6:37p albertl
21 * SW7425-2345: Changed total allocation and number of allocation tracking
22 * to work in eFastest.
23 *
24 * Hydra_Software_Devel/34   1/31/12 1:56p erickson
25 * SW7346-636: BMEM_P_Heap_CheckCachedAddress must test that pheap-
26 * >pvCache is non-NULL
27 *
28 * Hydra_Software_Devel/33   9/9/11 7:10p albertl
29 * SW7346-201, SW7405-5350: Moved watermark calculation to alloc and free
30 * for accurate calculation.  Changed BMEM_CONFIG_SAFE to not use
31 * BMEM_FREE_CAREFULLY.
32 *
33 * Hydra_Software_Devel/32   9/8/11 4:22p albertl
34 * SW7405-5350: Changed block info to be doublely linked.  Implemented
35 * faster pbi removal and assigned slower removal and error checking to
36 * BMEM_FREE_CAREFULLY.
37 *
38 * Hydra_Software_Devel/31   4/25/11 11:37a tdo
39 * SW7422-12: Fix default alignment for 7422/7425
40 *
41 * Hydra_Software_Devel/30   4/12/11 6:11p albertl
42 * SW7425-333: Added smart detection of cached address to
43 * BMEM_Heap_ConvertAddress_ToOffset.
44 *
45 * Hydra_Software_Devel/29   3/28/11 5:12p albertl
46 * SW7425-247: Incorporated BDBG_OBJECT handle validation.
47 *
48 * Hydra_Software_Devel/28   1/10/11 3:32p albertl
49 * SW7408-193: BMEM_Heapinfo now includes high watermark.  Added
50 * BMEM_Heap_ResetHighWatermark().
51 *
52 * Hydra_Software_Devel/27   8/17/10 5:58p nickh
53 * SW7422-12: Add default alignment for 7422
54 *
55 * Hydra_Software_Devel/26   3/26/10 5:14p albertl
56 * SW7405-3979: Changed filename field from string to pointer and removed
57 * slow copy operation.
58 *
59 * Hydra_Software_Devel/25   5/22/09 5:30p albertl
60 * PR55389: Replaced uintptr_t with uint32_t.
61 *
62 * Hydra_Software_Devel/24   5/15/09 8:42a gmohile
63 * PR 25109 : define uint32_t for 7401/03
64 *
65 * Hydra_Software_Devel/23   2/25/09 4:48p erickson
66 * PR52471: added const keyword to global data
67 *
68 * Hydra_Software_Devel/22   2/9/09 4:12p tdo
69 * PR50391: Correct default alignment setting for 7420
70 *
71 * Hydra_Software_Devel/21   8/6/08 11:30a agin
72 * PR43531:  Must use 128 byte alignment for 7325.
73 *
74 * Hydra_Software_Devel/20   7/23/08 2:31p albertl
75 * PR43531:  Added 7325 and 7335 to list of chips using default alignment
76 * of 2.
77 *
78 * Hydra_Software_Devel/19   6/11/08 3:41p albertl
79 * PR43531:  Changed to use default alignment of 6 for all new chips, and
80 * 2 for old chips.
81 *
82 * Hydra_Software_Devel/18   7/6/07 6:53p albertl
83 * PR31066:  Changed BMEM_HEAP_ALIGNMENT to 6 for 7400 B0 and later and
84 * 7405.  Added warnings when user specified alignment is less than and
85 * forced to BMEM_HEAP_ALIGNMENT.
86 *
87 * Hydra_Software_Devel/17   5/14/07 1:20p albertl
88 * PR30621:  Fixed heaps being added to allocation list but not being
89 * removed when freed in some safety configurations.
90 *
91 * Hydra_Software_Devel/16   3/15/07 7:23p albertl
92 * PR28682:  Changed BMEM_GUARD_SIZE_BYTES to be dynamically calculated
93 * based on runtime safety config.
94 *
95 * Hydra_Software_Devel/15   10/12/06 6:54p albertl
96 * PR20247:  Fixed all heap safety config field checks to use new
97 * pSafetyConfigInfo structure.
98 *
99 * Hydra_Software_Devel/14   6/21/06 3:11p albertl
100 * PR20247:  Moved safety config table to bmem.c  BMEM_P_Heap now uses a
101 * BMEM_P_SafetyConfigInfo pointer to track safety configuration settings
102 * instead of tracking each configuration separately.
103 *
104 * Hydra_Software_Devel/13   6/16/06 3:23p albertl
105 * PR20247, PR20276, PR20354:  Added the ability to control debug
106 * configuration at runtime.  Added address and offset checking to
107 * conversion functions.  BMEM_SetCache can now only be called before
108 * heaps are allocated from.  Added BMEM_Heap functions.
109 *
110 * Hydra_Software_Devel/12   10/7/05 3:53p jasonh
111 * PR 17374: Allowed GetHeapInfo to return original creation parameters.
112 *
113 * Hydra_Software_Devel/11   3/10/05 5:05p albertl
114 * PR13677:  Both local and system bookkeeping made available at heap
115 * creation though functions BMEM_CreateHeapSystem and
116 * BMEM_CreateHeapLocal.
117 *
118 * Hydra_Software_Devel/10   11/2/04 10:32a hongtaoz
119 * PR13076: added cached memory support;
120 *
121 * Hydra_Software_Devel/9   5/26/04 7:01p hongtaoz
122 * PR10059: sorted freed heap in size ascending order to avoid memory
123 * fragamentation;
124 *
125 * Hydra_Software_Devel/8   4/1/04 1:48p pntruong
126 * PR10201: Added memory monitor support.
127 *
128 * Hydra_Software_Devel/7   9/15/03 5:22p jasonh
129 * Fixed bookkeeping size for local allocations. Added offset and address
130 * values to private heap structure. Renamed semaphore macros.
131 *
132 * Hydra_Software_Devel/6   9/5/03 1:40p jasonh
133 * Inserted configuration values from bmem_config.h. Removed unnecessary
134 * prototypes.
135 *
136 * Hydra_Software_Devel/5   9/4/03 7:18p jasonh
137 * Commented out unnecessary function.
138 *
139 * Hydra_Software_Devel/4   9/2/03 1:47p vadim
140 * Moved macro BRCM_UNUSED from bmem.h
141 *
142 * Hydra_Software_Devel/3   9/2/03 1:39p vadim
143 * Some magnum updates.
144 *
145 * Hydra_Software_Devel/2   3/20/03 3:51p erickson
146 * renamed all MEM_ to BMEM_
147 *
148 * Hydra_Software_Devel/1   3/20/03 3:24p erickson
149 * initial bmem work, taken from SetTop/memorylib
150 *
151 * SanJose_DVTSW_Devel\6   4/18/02 11:29a poz
152 * Made the pMutex member conditionally compiled.
153 *
154 * SanJose_DVTSW_Devel\5   4/15/02 3:18p ngibbs
155 * Fixed a typo for BMEM_NOT_REENTRANT mode.
156 *
157 * SanJose_DVTSW_Devel\4   4/15/02 2:27p poz
158 * Use BMEM_REENTRANT_CONFIG instead of BMEM_REENTRANT.
159 *
160 * SanJose_DVTSW_Devel\3   4/15/02 1:27p poz
161 * Added prototypes for GetAddres, GetBlockInfo, and DropBlockInfo.
162 * Added macro implmentation of these functions for the local memory heap
163 * manager.
164 * Added pvData member for implementation-specific data.
165 * Updated comments, copyright and keywords in header.
166 *
167 ***************************************************************************/
168#ifndef BMEM_PRIV_H__
169#define BMEM_PRIV_H__
170
171#ifndef BMEM_CONFIG_H__
172#include "bmem_config.h"  /* for BMEM_GUARD_SIZE_BYTES, BMEM_FILENAME_LENGTH */
173#endif
174
175#if (BMEM_REENTRANT_CONFIG == BMEM_REENTRANT)
176#include "bkni_multi.h" /* for semaphores */
177#endif
178
179#ifdef __cplusplus
180extern "C" {
181#endif
182
183/**************************************************************************
184 * The macros below allow configuration of typical defaults. There should
185 * be no need to change them, but they're pulled out here just in case.
186 **************************************************************************/
187
188/*
189 * BMEM_MIN_ALLOCATION_SIZE (modifyable)
190 *
191 * When splitting up a free block for allocation, any left-overs smaller than
192 * BMEM_MIN_BLOCK_SIZE plus the bookkeping size will become part of the
193 * allocated block instead of made into a free block. This may seem
194 * wasteful, but it reduces overall heap fragmentation.
195 *
196 * If this value is set to 0, then all left-overs large enough to be a free
197 * or allocated block will be split up as individual blocks (even if it
198 * can only hold a zero size allocation).
199 *
200 * The default value is 128 bytes.
201 */
202#define BMEM_MIN_ALLOCATION_SIZE  128
203
204/*
205 * BMEM_GUARD_BYTE (modifyable)
206 *
207 * Guard bytes are set to this value when a block is created. In addition to
208 * the formal guard areas, the back scrap and front scrap are filled with
209 * this value. The default is 0xac (for "All Clear").
210 */
211#define BMEM_GUARD_BYTE      0xac
212
213/*
214 * BMEM_CLEAR_BYTE (modifyable)
215 *
216 * When a block is freed, the data bytes are set to this value.
217 *
218 * By default, the value is zero since on most systems 0x00000000 is an
219 * invalid address which is trapped.
220 *
221 */
222#define BMEM_CLEAR_BYTE      0x00
223
224/*
225 * BMEM_FILENAME_LENGTH (modifyable)
226 *
227 * The length of the filename to be stored for each block. Only used when
228 * BMEM_TRACK_FILE_AND_LINE is defined. Make this a multiple of 4 minus 1,
229 * like the default of 127.
230 */
231#define BMEM_FILENAME_LENGTH 127
232
233/**************************************************************************
234 * There is even less need to modify the macros from here on down.
235 **************************************************************************/
236
237/*
238 * BMEM_CHECK_ALL_GUARDS (modifyable)
239 *
240 * Whenever a block is freed or allocated, the list of allocated and freed
241 * blocks is walked and the guard values checked to make sure they haven't
242 * been touched.
243 *
244 * If you choose BMEM_CHECK_ALL_GUARDS, then you get BMEM_FREE_CAREFULLY
245 * as well.
246 *
247 * This can be very slow, and is undesireable in production code.
248 *
249 */
250#define BMEM_CHECK_ALL_GUARDS_FASTEST false
251#define BMEM_CHECK_ALL_GUARDS_NORMAL  false
252#define BMEM_CHECK_ALL_GUARDS_TRACK   false
253#define BMEM_CHECK_ALL_GUARDS_SAFE    true
254#define BMEM_CHECK_ALL_GUARDS_SAFEST  true
255
256#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
257        #define BMEM_CHECK_ALL_GUARDS BMEM_CHECK_ALL_GUARDS_FASTEST
258#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
259        #define BMEM_CHECK_ALL_GUARDS BMEM_CHECK_ALL_GUARDS_NORMAL
260#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
261        #define BMEM_CHECK_ALL_GUARDS BMEM_CHECK_ALL_GUARDS_TRACK
262#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
263        #define BMEM_CHECK_ALL_GUARDS BMEM_CHECK_ALL_GUARDS_SAFE
264#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
265        #define BMEM_CHECK_ALL_GUARDS BMEM_CHECK_ALL_GUARDS_SAFEST
266#else
267        #define BMEM_CHECK_ALL_GUARDS BMEM_CHECK_ALL_GUARDS_SAFEST
268#endif
269
270/*
271 * BMEM_FREE_CAREFULLY (modifyable)
272 *
273 * When a block is freed, the heap manager can check to see if it is in the
274 * list of allocated blocks before freeing it. This is slow (as it
275 * iterates over a linked list to find the block).
276 *
277 * Debug environments may want this check, production ones likely don't
278 * want the overhead.
279 *
280 * If you want careful frees, define BMEM_FREE_CAREFULLY.
281 * If you choose BMEM_CHECK_ALL_GUARDS, then you get BMEM_FREE_CAREFULLY
282 * as well.
283 *
284 */
285#define BMEM_FREE_CAREFULLY_FASTEST (false || BMEM_CHECK_ALL_GUARDS_FASTEST)
286#define BMEM_FREE_CAREFULLY_NORMAL  (false || BMEM_CHECK_ALL_GUARDS_NORMAL)
287#define BMEM_FREE_CAREFULLY_TRACK   (false || BMEM_CHECK_ALL_GUARDS_TRACK)
288#define BMEM_FREE_CAREFULLY_SAFE    (false || BMEM_CHECK_ALL_GUARDS_SAFE)
289#define BMEM_FREE_CAREFULLY_SAFEST  (true  || BMEM_CHECK_ALL_GUARDS_SAFEST)
290
291#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
292        #define BMEM_FREE_CAREFULLY BMEM_FREE_CAREFULLY_FASTEST
293#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
294        #define BMEM_FREE_CAREFULLY BMEM_FREE_CAREFULLY_NORMAL
295#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
296        #define BMEM_FREE_CAREFULLY BMEM_FREE_CAREFULLY_TRACK
297#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
298        #define BMEM_FREE_CAREFULLY BMEM_FREE_CAREFULLY_SAFE
299#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
300        #define BMEM_FREE_CAREFULLY BMEM_FREE_CAREFULLY_SAFEST
301#else
302        #define BMEM_FREE_CAREFULLY BMEM_FREE_CAREFULLY_SAFEST
303#endif
304
305/*
306 * BMEM_FREE_CLEAR (modifyable)
307 *
308 * When a block is freed, the heap manager can fill the data space with
309 * a byte value. This is done to guarantee that data isn't used after it
310 * has been freed.
311 *
312 * Debug environments may want this check, production ones likely don't
313 * want the overhead.
314 *
315 * If you want to clear out freed data, define BMEM_FREE_CLEAR.
316 */
317#define BMEM_FREE_CLEAR_FASTEST false
318#define BMEM_FREE_CLEAR_NORMAL  false
319#define BMEM_FREE_CLEAR_TRACK   false
320#define BMEM_FREE_CLEAR_SAFE    false
321#define BMEM_FREE_CLEAR_SAFEST  true
322
323#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
324        #define BMEM_FREE_CLEAR BMEM_FREE_CLEAR_FASTEST
325#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
326        #define BMEM_FREE_CLEAR BMEM_FREE_CLEAR_NORMAL
327#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
328        #define BMEM_FREE_CLEAR BMEM_FREE_CLEAR_TRACK
329#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
330        #define BMEM_FREE_CLEAR BMEM_FREE_CLEAR_SAFE
331#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
332        #define BMEM_FREE_CLEAR BMEM_FREE_CLEAR_SAFEST
333#else
334        #define BMEM_FREE_CLEAR BMEM_FREE_CLEAR_SAFEST
335#endif
336
337
338/*
339 * BMEM_GUARD_SIZE (modifyable)
340 *
341 * At the front and back of all blocks (both free and allocated) are a series
342 * of guard bytes. These allow detection of buffer over- and under-runs.
343 *
344 * If you have a system which has under- or over-runs, making this value
345 * large will protect the heap manager's data structures.
346 *
347 * The number of 4-byte words in these series is set by BMEM_GUARD_SIZE.
348 */
349#define BMEM_GUARD_UNIT_BYTES 4
350#define BMEM_GUARD_SIZE_FASTEST 0
351#define BMEM_GUARD_SIZE_NORMAL  1
352#define BMEM_GUARD_SIZE_TRACK   1
353#define BMEM_GUARD_SIZE_SAFE    2
354#define BMEM_GUARD_SIZE_SAFEST  8
355#define BMEM_GUARD_SIZE_DEFAULT 8
356
357#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
358        #define BMEM_GUARD_SIZE        BMEM_GUARD_SIZE_FASTEST
359#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
360        #define BMEM_GUARD_SIZE        BMEM_GUARD_SIZE_NORMAL
361#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
362        #define BMEM_GUARD_SIZE        BMEM_GUARD_SIZE_TRACK
363#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
364        #define BMEM_GUARD_SIZE        BMEM_GUARD_SIZE_SAFE
365#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
366        #define BMEM_GUARD_SIZE        BMEM_GUARD_SIZE_SAFEST
367#else
368        #define BMEM_GUARD_SIZE        BMEM_GUARD_SIZE_SAFEST
369#endif
370
371#define BMEM_GUARD_SIZE_BYTES (BMEM_GUARD_UNIT_BYTES * pheap->pSafetyConfigInfo->iGuardSize)
372
373/*
374 * BMEM_CHECK_DISORDER (modifyable)
375 *
376 * This turns on a consistency check during frees which checks to see if
377 * list of blocks are ordered by size.
378 *
379 * This is more of a check of the heap manager itself than of programs
380 * which are running with this manager, so it's off for everything but
381 * the safest configurations.
382 */
383#define BMEM_CHECK_DISORDER_FASTEST false
384#define BMEM_CHECK_DISORDER_NORMAL  false
385#define BMEM_CHECK_DISORDER_TRACK   false
386#define BMEM_CHECK_DISORDER_SAFE    false
387#define BMEM_CHECK_DISORDER_SAFEST  true
388
389#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
390        #define BMEM_CHECK_DISORDER BMEM_CHECK_DISORDER_FASTEST
391#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
392        #define BMEM_CHECK_DISORDER BMEM_CHECK_DISORDER_NORMAL
393#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
394        #define BMEM_CHECK_DISORDER BMEM_CHECK_DISORDER_TRACK
395#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
396        #define BMEM_CHECK_DISORDER BMEM_CHECK_DISORDER_SAFE
397#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
398        #define BMEM_CHECK_DISORDER BMEM_CHECK_DISORDER_SAFEST
399#else
400        #define BMEM_CHECK_DISORDER BMEM_CHECK_DISORDER_SAFEST
401#endif
402
403/*
404 * BMEM_CHECK_GUARD_ON_FREE (modifyable)
405 *
406 * When a block is freed, the guard values are checked to make sure they
407 * haven't been touched.
408 *
409 */
410#define BMEM_CHECK_GUARD_ON_FREE_FASTEST false
411#define BMEM_CHECK_GUARD_ON_FREE_NORMAL  true
412#define BMEM_CHECK_GUARD_ON_FREE_TRACK   true
413#define BMEM_CHECK_GUARD_ON_FREE_SAFE    true
414#define BMEM_CHECK_GUARD_ON_FREE_SAFEST  true
415
416#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
417        #define BMEM_CHECK_GUARD_ON_FREE BMEM_CHECK_GUARD_ON_FREE_FASTEST
418#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
419        #define BMEM_CHECK_GUARD_ON_FREE BMEM_CHECK_GUARD_ON_FREE_NORMAL
420#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
421        #define BMEM_CHECK_GUARD_ON_FREE BMEM_CHECK_GUARD_ON_FREE_TRACK
422#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
423        #define BMEM_CHECK_GUARD_ON_FREE BMEM_CHECK_GUARD_ON_FREE_SAFE
424#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
425        #define BMEM_CHECK_GUARD_ON_FREE BMEM_CHECK_GUARD_ON_FREE_SAFEST
426#else
427        #define BMEM_CHECK_GUARD_ON_FREE BMEM_CHECK_GUARD_ON_FREE_SAFEST
428#endif
429
430
431/*
432 * BMEM_TRACK_FILE_AND_LINE (modifyable)
433 *
434 * Whenever a block is allocated, record the file and line number given
435 * for later reporting or debugging.
436 *
437 * This takes quite a bit of extra space which is likley undesireable in
438 * production code.
439 *
440 */
441#define BMEM_TRACK_FILE_AND_LINE_FASTEST false
442#define BMEM_TRACK_FILE_AND_LINE_NORMAL  false
443#define BMEM_TRACK_FILE_AND_LINE_TRACK   true
444#define BMEM_TRACK_FILE_AND_LINE_SAFE    true
445#define BMEM_TRACK_FILE_AND_LINE_SAFEST  true
446
447#if (BMEM_SAFETY_CONFIG==BMEM_CONFIG_FASTEST)
448        #define BMEM_TRACK_FILE_AND_LINE BMEM_TRACK_FILE_AND_LINE_FASTEST
449#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_NORMAL)
450        #define BMEM_TRACK_FILE_AND_LINE BMEM_TRACK_FILE_AND_LINE_NORMAL
451#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_TRACK)
452        #define BMEM_TRACK_FILE_AND_LINE BMEM_TRACK_FILE_AND_LINE_TRACK
453#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFE)
454        #define BMEM_TRACK_FILE_AND_LINE BMEM_TRACK_FILE_AND_LINE_SAFE
455#elif (BMEM_SAFETY_CONFIG==BMEM_CONFIG_SAFEST)
456        #define BMEM_TRACK_FILE_AND_LINE BMEM_TRACK_FILE_AND_LINE_SAFEST
457#else
458        #define BMEM_TRACK_FILE_AND_LINE BMEM_TRACK_FILE_AND_LINE_SAFEST
459#endif
460
461
462/**************************************************************************
463 **************************************************************************
464 * Modifying anything from here down can cause blindness and sterility
465 **************************************************************************
466 **************************************************************************/
467
468/*
469 * BMEM_TRACK_ALLOCATIONS (do not modify, derived)
470 *
471 * For debugging purposes, you may wish to keep a list of all of the
472 * allocated blocks in the system. This uses space in the block header and
473 * is slower since list management must be done during allocation and
474 * deallocation.
475 *
476 * This is required for BMEM_FREE_CAREFULLY and BMEM_CHECK_ALL_GUARDS to work.
477 * It's also needed to track allocation files and lines.
478 *
479 */
480#define BMEM_TRACK_ALLOCATIONS_FASTEST (BMEM_FREE_CAREFULLY_FASTEST) || (BMEM_CHECK_ALL_GUARDS_FASTEST) || (BMEM_TRACK_FILE_AND_LINE_FASTEST)
481#define BMEM_TRACK_ALLOCATIONS_NORMAL  (BMEM_FREE_CAREFULLY_NORMAL)  || (BMEM_CHECK_ALL_GUARDS_NORMAL)  || (BMEM_TRACK_FILE_AND_LINE_NORMAL)
482#define BMEM_TRACK_ALLOCATIONS_TRACK   (BMEM_FREE_CAREFULLY_TRACK)   || (BMEM_CHECK_ALL_GUARDS_TRACK)   || (BMEM_TRACK_FILE_AND_LINE_TRACK)
483#define BMEM_TRACK_ALLOCATIONS_SAFE    (BMEM_FREE_CAREFULLY_SAFE)    || (BMEM_CHECK_ALL_GUARDS_SAFE)    || (BMEM_TRACK_FILE_AND_LINE_SAFE)
484#define BMEM_TRACK_ALLOCATIONS_SAFEST  (BMEM_FREE_CAREFULLY_SAFEST)  || (BMEM_CHECK_ALL_GUARDS_SAFEST)  || (BMEM_TRACK_FILE_AND_LINE_SAFEST)
485
486#define BMEM_TRACK_ALLOCATIONS (BMEM_FREE_CAREFULLY) || (BMEM_CHECK_ALL_GUARDS) || (BMEM_TRACK_FILE_AND_LINE)
487
488/*
489 * BMEM_FRONT_BOOKKEEPING_SIZE_LOCAL/SYSTEM (do not modify, derived)
490 *
491 * Handy macro which defines how much space is required at the front in
492 * order to hold bookkeeping data and the guard.
493 *
494 * With local bookkeeping:
495 *    [align scrap][block info [guard]][...data...][guard][back scrap]
496 *                 ^------------------^
497 *                 BMEM_FRONT_BOOKKEEPING_SIZE_LOCAL
498 *
499 * With system bookkeeping:
500 *    [align scrap][guard][...data...][guard][back scrap]
501 *                 ^-----^
502 *                 BMEM_FRONT_BOOKKEEPING_SIZE_SYSTEM
503 */
504#define BMEM_FRONT_BOOKKEEPING_SIZE_LOCAL (sizeof(BMEM_P_BlockInfo)+BMEM_GUARD_SIZE_BYTES)
505#define BMEM_FRONT_BOOKKEEPING_SIZE_SYSTEM (BMEM_GUARD_SIZE_BYTES)
506
507/*
508 * BMEM_BACK_BOOKKEEPING_SIZE (do not modify, derived)
509 *
510 * Handy macro which defines how much space is required at the back for the
511 * guard.
512 *
513 * [align scrap][block info [guard]][...data...][guard][back scrap]
514 *                                              ^-----^
515 *                                              BMEM_BACK_BOOKKEEPING_SIZE
516 */
517#define BMEM_BACK_BOOKKEEPING_SIZE (BMEM_GUARD_SIZE_BYTES)
518
519/*
520 * BMEM_BOOKKEEPING_SIZE_LOCAL/SYSTEM (do not modify, derived)
521 *
522 * The number of bytes needed just to track the block. No actual blocks can
523 * be smaller than this size. It is the overhead for each block. A zero size
524 * allocation will create a block of this size.
525 */
526#define BMEM_BOOKKEEPING_SIZE_LOCAL (BMEM_FRONT_BOOKKEEPING_SIZE_LOCAL+BMEM_BACK_BOOKKEEPING_SIZE)
527#define BMEM_BOOKKEEPING_SIZE_SYSTEM (BMEM_FRONT_BOOKKEEPING_SIZE_SYSTEM+BMEM_BACK_BOOKKEEPING_SIZE)
528
529/*
530 * BMEM_MIN_BLOCK_SIZE_LOCAL/SYSTEM (do not modify, derived)
531 *
532 * When splitting up a free block for allocation, any left-overs smaller than
533 * BMEM_MIN_BLOCK_SIZE_LOCAL/SYSTEM will become part of the allocated block.
534 * This may seem wasteful, but it reduces overall heap fragmentation.
535 *
536 * If this value is set to BMEM_BOOKKEEPING_SIZE_LOCAL/SYSTEM, then all
537 * left-overs large enough to be a free or allocated block will be split up as
538 * individual blocks. It cannot be set below BMEM_BACK_BOOKKEEPING_SIZE.
539 *
540 * The default value is big enough for bookkeeping and a 128 byte
541 * allocation.
542 */
543#define BMEM_MIN_BLOCK_SIZE_LOCAL  (BMEM_BOOKKEEPING_SIZE_LOCAL + BMEM_MIN_ALLOCATION_SIZE)
544#define BMEM_MIN_BLOCK_SIZE_SYSTEM  (BMEM_BOOKKEEPING_SIZE_SYSTEM + BMEM_MIN_ALLOCATION_SIZE)
545
546/*
547 * BMEM_HEAP_ALIGNMENT (do not modify)
548 *
549 * The heap requires a certain alignment since its data structures
550 * are contained in the heap itself. The largest item in the BMEM_Heap and
551 * BMEM_BlockInfo structures are pointers and uint32_ts, so a 4-byte alignment
552 * is required.
553 *
554 * The default value is 6, which means the last six bits of the address will
555 * be zero, which will yield 64-byte alignment.
556 *
557 * For older chips, the default value is 2, which yields a
558 * 4-byte alignment.
559 */
560#if ((BCHP_CHIP==7038) || (BCHP_CHIP==3560) || (BCHP_CHIP==7401) || (BCHP_CHIP==7403) || \
561         (BCHP_CHIP==7118) || (BCHP_CHIP==3563) || (BCHP_CHIP==3543) || ((BCHP_CHIP==7400) && (BCHP_VER < BCHP_VER_B0)))
562#define BMEM_HEAP_ALIGNMENT         2
563#elif (BCHP_CHIP==7325)
564#define BMEM_HEAP_ALIGNMENT         7   /* The 7325 requires a minimum 128 byte alignment due to the L2 cache size */
565#elif (BCHP_CHIP==7420 && BCHP_VER==BCHP_VER_A0)
566#define BMEM_HEAP_ALIGNMENT         5
567#elif (BCHP_CHIP==7420 && BCHP_VER>=BCHP_VER_A1)
568#define BMEM_HEAP_ALIGNMENT         7
569#elif (BCHP_CHIP==7422) || (BCHP_CHIP==7425)
570#define BMEM_HEAP_ALIGNMENT         7
571#else
572#define BMEM_HEAP_ALIGNMENT         6   /* All other new chips by default will use a 64 byte alignment */
573#endif
574
575#define BMEM_HEAP_ALIGNMENT_MASK    ((1L<<BMEM_HEAP_ALIGNMENT)-1)
576
577/***************************************************************************
578 * BMEM_P_BlockInfo - Holds the bookkeeping for a block of memory
579 *
580 * Structure of a block:
581 *
582 * [align scrap][block info [guard]][...data...][guard][back scrap]
583 * ^--------------------------------------------------------------^
584 *                              ulSize
585 * ^-----------^                                       ^----------^
586 * ulFrontScrap                                        ulBackScrap
587 *
588 */
589
590typedef struct BMEM_P_BlockInfo BMEM_P_BlockInfo;
591struct BMEM_P_BlockInfo
592{
593        size_t         ulSize;       /* Total size including guards and scrap */
594        size_t         ulFrontScrap; /* Space at front left over from alignment */
595        size_t         ulBackScrap;  /* Space at back left over to reduce fragmentation */
596#ifdef BMEM_TRACK_FILE_AND_LINE
597        uint32_t       ulLine;
598        const char    *pchFilename;
599#endif
600        struct BMEM_P_BlockInfo *pnext; /* Points to next allocated or free block */
601        struct BMEM_P_BlockInfo *pprev; /* Points to next allocated or free block */
602};
603
604
605typedef struct BMEM_P_SafetyConfigInfo
606{
607        uint32_t eSafetyConfig;
608        bool     bTrackAllocations;
609        bool     bFreeClear;
610        bool     bFreeCarefully;
611        uint32_t iGuardSize;
612        bool     bCheckDisorder;
613        bool     bCheckGuardOnFree;
614        bool     bCheckAllGuards;
615        bool     bTrackFileAndLine;
616} BMEM_P_SafetyConfigInfo;
617
618BDBG_OBJECT_ID_DECLARE(BMEM_Heap);
619
620/***************************************************************************
621 * BMEM_Heap
622 *
623 * Structure of a local heap:
624 *
625 * [BMEM_Heap][scrap][block][free block][block]...[block][scrap]
626 *           pStart                                    pEnd
627 *                         pFreeTop
628 *
629 * pAllocTop may point to any allocated block. It isn't ordered. pFreeTop
630 * will always point to the first free block.
631 *
632 * The two scraps might be created depending on the alignment of the chunk
633 * of memory given to the manager and the alignement required.
634 *
635 * The structure of a system heap is the same except the heap information
636 * (BMEM_Heap) isn't kept at the front of the heap but in system memory
637 * instead.
638 *
639 */
640typedef struct BMEM_P_Heap
641{
642    BDBG_OBJECT(BMEM_Heap)
643        BLST_S_ENTRY(BMEM_P_Heap) link;
644        BMEM_ModuleHandle hMem;
645
646        BMEM_P_BlockInfo *pFreeTop;      /* Pointer to the first free block */
647        BMEM_P_BlockInfo *pAllocTop;     /* Pointer to the first allocated block */
648
649        uint32_t ulAlignMask;            /* Mask to use to create aligned blocks */
650        unsigned int  uiAlignment;       /* Heap alignment (power of 2). */
651
652        uint8_t *pStart;                 /* Pointer to first usable available byte in heap */
653        uint8_t *pEnd;                   /* Pointer to last usable available byte in heap */
654
655        uint32_t ulNumErrors;            /* Counts number of errors detected */
656
657#if (BMEM_REENTRANT_CONFIG == BMEM_REENTRANT)
658        BKNI_MutexHandle pMutex;         /* Semaphore to lock this heap if BMEM_REENTRANT */
659#endif
660
661        void *pvData;                    /* Pointer to private data allocated by heap manager */
662
663        void *pvHeap;                    /* Original starting address passed during heap create */
664        void *pvCache;                   /* Cached start address of the heap. */
665        uint32_t ulOffset;               /* Device offset passed during heap create */
666        size_t zSize;                    /* Size of the heap in bytes. */
667        uint32_t ulNumAllocated;         /* Number of allocated blocks */
668        size_t ulTotalAllocated;         /* Total allocated memory */
669        size_t ulHighWatermark;          /* High memory watermark (max memory used at once) */
670
671        const BMEM_P_SafetyConfigInfo *pSafetyConfigInfo;
672        uint32_t ulGuardSizeBytes;       /* Guard band size */
673
674        uint32_t eBookKeeping;
675
676        bool bCacheLocked;               /* Locks cache mapping from being changed once heap is allocated from */
677
678        BMEM_FlushCallback pfFlushCb;    /* Callback used to flush cache at task time */
679        BMEM_FlushCallback pfFlushCb_isr;/* Callback used to flush cache at isr time */
680
681        BMEM_MonitorInterface *monitor;  /* Installed memory monitor */
682
683        uint32_t (*pGetAddressFunc) (BMEM_Handle, BMEM_P_BlockInfo *);
684        BMEM_P_BlockInfo* (*pGetBlockInfoFunc) (BMEM_Handle, uint32_t);
685        void (*pDropBlockInfoFunc) (BMEM_Handle, BMEM_P_BlockInfo *);
686        void (*pDestroyHeapFunc) (BMEM_Handle);
687}
688BMEM_P_Heap;
689
690/*
691Summary:
692This structure defines the head element for creating
693a linked list.
694*/
695typedef struct BMEM_HeapList BMEM_HeapList;
696BLST_S_HEAD(BMEM_HeapList, BMEM_P_Heap);
697
698BDBG_OBJECT_ID_DECLARE(BMEM);
699
700/***************************************************************************
701 * BMEM_P_Mem
702 *
703 */
704typedef struct BMEM_P_Mem
705{
706    BDBG_OBJECT(BMEM)
707        BMEM_HeapList HeapList;
708} BMEM_P_Mem;
709
710
711#define BMEM_HEAP_ALIGN(pv) ((((uint32_t)(pv))+BMEM_HEAP_ALIGNMENT_MASK) & ~BMEM_HEAP_ALIGNMENT_MASK)
712
713#define BMEM_CONFIG_DEFAULT 0
714
715extern const BMEM_P_SafetyConfigInfo BMEM_P_SafetyConfigTbl[];
716extern const uint32_t BMEM_P_SafetyTableSize;
717
718
719/*
720 * Functions to fiddle with the guard bytes
721 */
722void BMEM_P_FillGuard(BMEM_Handle pheap, BMEM_P_BlockInfo *pbi);
723BERR_Code BMEM_P_CheckGuard(BMEM_Handle pheap, BMEM_P_BlockInfo *pbi);
724
725#define BMEM_P_GetAddress(pheap, pbi) ((*(pheap->pGetAddressFunc))(pheap, pbi))
726#define BMEM_P_GetBlockInfo(pheap, addr) ((*(pheap->pGetBlockInfoFunc))(pheap, addr))
727#define BMEM_P_DropBlockInfo(pheap, pbi) ((*(pheap->pDropBlockInfoFunc))(pheap, pbi))
728#define BMEM_P_Heap_CheckAddress(pheap, addr) ((addr >= pheap->pStart) && (addr <= pheap->pEnd))
729#define BMEM_P_Heap_CheckCachedAddress(pheap, addr) (pheap->pvCache && (addr >= (uint8_t*)pheap->pvCache) && (addr < (uint8_t*)pheap->pvCache+pheap->zSize))
730
731/*
732 * These functions are provided by the local heap manager
733 */
734BERR_Code BMEM_P_LocalCreateHeap(BMEM_Handle *ppHeap, void *pvHeap, uint32_t ulOffset, size_t zSize, BMEM_Heap_Settings *pHeapSettings);
735uint32_t BMEM_P_LocalGetAddress(BMEM_Handle pheap, BMEM_P_BlockInfo *pbi);
736BMEM_P_BlockInfo *BMEM_P_LocalGetBlockInfo(BMEM_Handle pheap, uint32_t addr);
737void BMEM_P_LocalDropBlockInfo(BMEM_Handle pheap, BMEM_P_BlockInfo *pbi);
738void BMEM_P_LocalDestroyHeap(BMEM_Handle Heap);
739
740/*
741 * These functions are provided by the system heap manager
742 */
743BERR_Code BMEM_P_SystemCreateHeap(BMEM_Handle *ppHeap, void *pvHeap, uint32_t ulOffset, size_t zSize, BMEM_Heap_Settings *pHeapSettings);
744uint32_t BMEM_P_SystemGetAddress(BMEM_Handle pheap, BMEM_P_BlockInfo *pbi);
745BMEM_P_BlockInfo *BMEM_P_SystemGetBlockInfo(BMEM_Handle pheap, uint32_t addr);
746void BMEM_P_SystemDropBlockInfo(BMEM_Handle pheap, BMEM_P_BlockInfo *pbi);
747void BMEM_P_SystemDestroyHeap(BMEM_Handle Heap);
748
749#if (BMEM_REENTRANT_CONFIG == BMEM_REENTRANT)
750
751#define BMEM_P_GET_SEMAPHORE(heap)     BKNI_AcquireMutex((heap)->pMutex)
752#define BMEM_P_RELEASE_SEMAPHORE(heap) BKNI_ReleaseMutex((heap)->pMutex)
753
754#else /* BMEM_NOT_REENTRANT */
755
756#define BMEM_P_GET_SEMAPHORE(heap)              (BERR_SUCCESS)
757#define BMEM_P_RELEASE_SEMAPHORE(heap)
758
759#endif /* if (BMEM_REENTRANT_CONFIG == BMEM_REENTRANT) */
760
761#ifdef __cplusplus
762} /* end extern "C" */
763#endif
764
765#endif /* #ifndef BMEM_PRIV_H__ */
766
767/* End of File */
Note: See TracBrowser for help on using the repository browser.