source: svn/trunk/newcon3bcm2_21bu/magnum/basemodules/kni/linuxuser/bkni_track_mallocs.inc

Last change on this file was 2, checked in by phkim, 11 years ago

1.phkim

  1. revision copy newcon3sk r27
  • Property svn:executable set to *
File size: 17.7 KB
Line 
1/***************************************************************************
2 *     Copyright (c) 2003-2011, Broadcom Corporation
3 *     All Rights Reserved
4 *     Confidential Property of Broadcom Corporation
5 *
6 *  THIS SOFTWARE MAY ONLY BE USED SUBJECT TO AN EXECUTED SOFTWARE LICENSE
7 *  AGREEMENT  BETWEEN THE USER AND BROADCOM.  YOU HAVE NO RIGHT TO USE OR
8 *  EXPLOIT THIS MATERIAL EXCEPT SUBJECT TO THE TERMS OF SUCH AN AGREEMENT.
9 *
10 * $brcm_Workfile: bkni_track_mallocs.inc $
11 * $brcm_Revision: Hydra_Software_Devel/10 $
12 * $brcm_Date: 6/6/11 3:27p $
13 *
14 * Module Description:
15 *
16 * Wrapper over host malloc and free routines
17 *
18 * Revision History:
19 * $brcm_Log: /magnum/basemodules/kni/linuxuser/bkni_track_mallocs.inc $ *
20 *
21 * Hydra_Software_Devel/10   6/6/11 3:27p vsilyaev
22 * SW7405-4477: Routed all debug output through buffer and use external
23 * application to extract and print debug output
24 *
25 * Hydra_Software_Devel/9   8/3/10 12:03p erickson
26 * SW7400-2857: refactor BKNI_P_GetTrackAllocEntry into separate read-only
27 * and read-write functions. this limits worst-case stack usage in
28 * BDBG_OBJECT_ASSERT.
29 *
30 * Hydra_Software_Devel/8   2/5/10 3:24p erickson
31 * SW7405-2397: NULL freed pointer
32 *
33 * Hydra_Software_Devel/7   5/15/09 12:15p jtna
34 * PR54398: redefine B_TRACK_ALLOC_LOCK for linuxuser and remove return
35 * value check
36 *
37 * Hydra_Software_Devel/6   4/30/09 4:52p erickson
38 * PR54759: fix memleak on error path
39 *
40 * Hydra_Software_Devel/5   4/21/09 10:49a erickson
41 * PR54398: fix warnings
42 *
43 * Hydra_Software_Devel/4   4/20/09 12:40p jtna
44 * PR53778: update for linuxkernel
45 *
46 * Hydra_Software_Devel/3   4/17/09 6:30p vsilyaev
47 * PR 53778: Added description of expected inputs
48 *
49 * Hydra_Software_Devel/2   4/17/09 6:24p vsilyaev
50 * PR 53778: Filter results printed on the table resizing
51 *
52 * Hydra_Software_Devel/1   4/17/09 5:39p vsilyaev
53 * PR 53778: Wrapper over host malloc and free routines
54 *
55 * $old_brcm_Log: /magnum/basemodules/kni/linuxuser/bkni.c $ *
56 *
57 * Hydra_Software_Devel/66   4/17/09 5:07p vsilyaev
58 * PR 53778: Try to detect double-free blocks and print location where
59 * they were allocated and freed
60 *
61 * Hydra_Software_Devel/65   4/17/09 4:08p vsilyaev
62 * PR 53778: Fixed detection of double-free'd blocks
63 *
64 ***************************************************************************/
65
66/*
67   This "macro" expects the following defines as it input:
68    B_TRACK_ALLOC_LOCK() - code to acquire lock
69    B_TRACK_ALLOC_UNLOCK() - code to release lock
70    B_TRACK_ALLOC_ALLOC(size)  - allocates block of the system memory
71    B_TRACK_ALLOC_FREE(ptr)  - frees allocated block
72    B_TRACK_ALLOC_OS  - string that reflect name of the OS, it's only used in the debug output
73*/
74
75#if BKNI_TRACK_MALLOCS
76#define  BKNI_GARBLE_MALLOC 1
77
78struct BKNI_TrackAllocEntry {
79    const void *mem;
80    size_t size;
81    const char *file;
82    unsigned line;
83};
84
85struct BKNI_P_AllocUser {
86    unsigned count;
87    unsigned size;
88    const char *file;
89    unsigned line;
90};
91
92static struct {
93    size_t allocated, freed, peak;
94    unsigned allocated_cnt, freed_cnt;
95    unsigned table_size;
96    struct BKNI_TrackAllocEntry *table;
97    struct {
98        struct BKNI_TrackAllocEntry alloc;
99        struct {
100            const char *file;
101            unsigned line;
102        } free;
103    } history[256]; /* FIFO of last freed objects */
104    struct BKNI_P_AllocUser alloc_users[16]; /* temporary array used to account frequent users in the event of table resizing */
105} g_alloc_state;
106
107static void
108b_memset32(void *ptr, size_t size, uint32_t word)
109{
110   unsigned i;
111
112   if(ptr) {
113       for(i=0; i+3 < size; i+=4) {
114          *(uint32_t*)(((uint8_t *)ptr)+i)=word;
115       }
116   }
117   return;
118}
119
120static const char *
121b_shorten_filename(const char *pFileName)
122{
123    const char *s;
124    unsigned i;
125
126    if(pFileName==NULL) {
127        return "unknown";
128    }
129    for(s=pFileName;*s != '\0';s++) { } /* search forward */
130
131    for(i=0;s!=pFileName;s--) { /* search backward */
132        if(*s=='/' || *s=='\\') {
133            i++;
134            if(i>4) {
135                return s+1;
136            }
137        }
138    }
139    return pFileName;
140}
141
142
143static unsigned
144b_alloc_hashkey(const void *mem)
145{
146    unsigned long hash = (unsigned long)mem;
147    /* calculate a hash */
148    hash = (hash ^ 61) ^ (hash>> 16);
149    hash += (hash << 3);
150    hash ^= (hash >> 4);
151    hash *= 0x66635119; /* 1717784857  some prime number */
152    hash ^= (hash >> 15);
153    return hash;
154}
155
156static struct BKNI_TrackAllocEntry *
157BKNI_P_GetTrackAllocEntry_one(const void *mem, const void *match, struct BKNI_TrackAllocEntry *entries, unsigned table_size)
158{
159    unsigned i;
160    unsigned index;
161    for(index=i=b_alloc_hashkey(mem)%table_size;i<2*table_size;i++,index++) {
162        if(index>=table_size) {
163            index = 0;
164        }
165        if(entries[index].mem==match) {
166#if 0
167            /* some profiling code to measure effectivness of the hash lookup */
168            static unsigned max_count=0;
169            static unsigned total_count=0;
170            unsigned count = i-(b_alloc_hashkey(mem)%table_size);
171            total_count+=count+1;
172            if(count>max_count) {
173                unsigned avg = (100*total_count)/(g_alloc_state.allocated_cnt+g_alloc_state.freed_cnt+1);
174                max_count=count;
175                BDBG_P_PrintString("BKNI_GetTrackAllocEntry: scan_count:%u(%u.%02u avg) addr:%#lx key:%u elements:%u(%u)\n", count,  avg/100, avg%100, (unsigned long)mem, (b_alloc_hashkey(mem)%table_size), g_alloc_state.allocated_cnt-g_alloc_state.freed_cnt, table_size);
176            }
177#endif
178            return  entries+index;
179        }
180    }
181    return NULL;
182}
183
184static struct BKNI_TrackAllocEntry *
185BKNI_P_GetTrackAllocEntry_resize(const struct BKNI_TrackAllocEntry *entries, unsigned table_size, unsigned new_table_size)
186{
187    unsigned i;
188    struct BKNI_TrackAllocEntry *new_entries;
189
190    new_entries = B_TRACK_ALLOC_ALLOC(new_table_size*sizeof(*new_entries));
191    if(new_entries==NULL) { goto error;}
192
193    for(i=0;i<new_table_size;i++) {
194        new_entries[i].mem = NULL;
195        new_entries[i].size = 0;
196        new_entries[i].file = NULL;
197        new_entries[i].line = 0;
198    }
199    if(entries) {
200        struct BKNI_P_AllocUser *alloc_users = g_alloc_state.alloc_users;
201        const unsigned alloc_users_size = sizeof(g_alloc_state.alloc_users)/sizeof(g_alloc_state.alloc_users[0]);
202
203        BDBG_P_PrintString("BKNI_P_GetTrackAllocEntry_resize: resizing from %u->%u\n", table_size, new_table_size);
204        b_memset32(alloc_users, sizeof(*alloc_users)*alloc_users_size, 0);
205        for(i=0;i<table_size;i++) {
206            if(entries[i].mem) {
207                struct BKNI_TrackAllocEntry *entry = BKNI_P_GetTrackAllocEntry_one(entries[i].mem, NULL, new_entries, new_table_size);
208                unsigned j;
209                if(!entry) {
210                    goto error;
211                }
212                *entry = entries[i];
213                for(j=0;j<alloc_users_size;j++) {
214                    if(alloc_users[j].count==0) {
215                        /* found empty slot, just use it */
216                        alloc_users[j].count = 1;
217                        alloc_users[j].size = entry->size;
218                        alloc_users[j].file = entry->file;
219                        alloc_users[j].line = entry->line;
220                        break;
221                    } else if(alloc_users[j].file == entry->file && alloc_users[j].line == entry->line) {
222                        /* found existing entry, update it and move it up to keep array sorted */
223                        alloc_users[j].count++;
224                        alloc_users[j].size += entry->size;
225                        for(;j>0;j--) {
226                            struct BKNI_P_AllocUser tmp;
227                            if(alloc_users[j].count <= alloc_users[j-1].count) {
228                                break;
229                            }
230                            tmp = alloc_users[j];
231                            alloc_users[j] = alloc_users[j-1];
232                            alloc_users[j-1] = tmp;
233                        }
234                        break;
235                    }
236                }
237                if(j==alloc_users_size) { /* no empty slots */
238                    j=alloc_users_size/2; /* wipe out half of old entries */
239                    alloc_users[j].count = 1;
240                    alloc_users[j].size = entry->size;
241                    alloc_users[j].file = entry->file;
242                    alloc_users[j].line = entry->line;
243                    for(j=j+1;j<alloc_users_size;j++) {
244                        alloc_users[j].count = 0;
245                        alloc_users[j].size = 0;
246                        alloc_users[j].file = 0;
247                        alloc_users[j].line = 0;
248                    }
249                }
250            }
251        }
252        BDBG_P_PrintString("BKNI_Malloc(%s) top users:\n%10s, %7s, filename:line\n", B_TRACK_ALLOC_OS,  "blocks", "bytes");
253        for(i=0;i<alloc_users_size;i++) {
254            if(alloc_users[i].count==0) {
255                break;
256            }
257            if(alloc_users[i].count>=table_size/16) {
258                BDBG_P_PrintString("%#10u, %7u, %s:%u\n", alloc_users[i].count, alloc_users[i].size,b_shorten_filename(alloc_users[i].file), alloc_users[i].line);
259            }
260        }
261    }
262    return new_entries;
263error:
264    if (new_entries) {
265        B_TRACK_ALLOC_FREE(new_entries);
266    }
267    BDBG_P_PrintString("BKNI_P_GetTrackAllocEntry_resize: can't reallocate alloc table %u->%u\n", table_size, new_table_size);
268    return NULL;
269}
270
271/* this performs a lookup for the pointer. if it already exists, it is reused. otherwise, it is created.
272if the existing table is filled, a larger table is created. */
273static struct BKNI_TrackAllocEntry *
274BKNI_P_CreateTrackAllocEntry(const void *mem)
275{
276    struct BKNI_TrackAllocEntry *entries = g_alloc_state.table;
277    struct BKNI_TrackAllocEntry *entry=NULL;
278    if(entries) {
279        unsigned table_size =  g_alloc_state.table_size;
280        entry = BKNI_P_GetTrackAllocEntry_one(mem, NULL, entries, table_size);
281        if(!entry) { /* if were looking for the empty entry */
282            unsigned new_table_size =  2*table_size;
283            struct BKNI_TrackAllocEntry *new_entries = BKNI_P_GetTrackAllocEntry_resize(entries, table_size, new_table_size);
284            B_TRACK_ALLOC_FREE(entries);
285            entries = new_entries;
286            g_alloc_state.table = new_entries;
287            if(new_entries) {
288                g_alloc_state.table_size = new_table_size;
289                entry = BKNI_P_GetTrackAllocEntry_one(mem, NULL, entries, new_table_size);
290            } else {
291                g_alloc_state.table_size = 0;
292            }
293        }
294    }
295    return entry;
296}
297
298/* this is a read-only lookup. if the entry does not exist, it will return NULL. */
299static struct BKNI_TrackAllocEntry *
300BKNI_P_GetTrackAllocEntry(const void *mem)
301{
302    struct BKNI_TrackAllocEntry *entries = g_alloc_state.table;
303    struct BKNI_TrackAllocEntry *entry = NULL;
304    if(entries) {
305        unsigned table_size = g_alloc_state.table_size;
306        entry = BKNI_P_GetTrackAllocEntry_one(mem, mem, entries, table_size);
307    }
308    return entry;
309}
310
311
312void *
313BKNI_Malloc_tagged(size_t size, const char *file, unsigned line)
314{
315    void *mem;
316    ASSERT_NOT_CRITICAL();
317
318#ifdef __KERNEL__
319#ifdef BKNI_METRICS_ENABLED
320    g_metrics.totalMemoryAllocated += size;
321    g_metrics.totalMallocs++;
322#endif
323#endif
324    mem = B_TRACK_ALLOC_ALLOC(size);
325
326    if(mem) {
327        struct BKNI_TrackAllocEntry *entry;
328        size_t used;
329#if BKNI_GARBLE_MALLOC
330        b_memset32(mem, size, 0xDEADDA7A);
331#endif
332        B_TRACK_ALLOC_LOCK();
333        entry = BKNI_P_CreateTrackAllocEntry(mem);
334        if(entry) {
335            entry->mem = mem;
336            entry->size = size;
337            entry->file = file;
338            entry->line = line;
339        }
340        g_alloc_state.allocated += size;
341        g_alloc_state.allocated_cnt ++;
342        used = g_alloc_state.allocated - g_alloc_state.freed;
343        if(used>g_alloc_state.peak) {
344            g_alloc_state.peak = used;
345        }
346        B_TRACK_ALLOC_UNLOCK();
347    } else {
348        /*
349        BKNI_DumpMallocs_Size(0);
350        */
351        BDBG_P_PrintString("BKNI_Alloc(%u): returned NULL at %s:%u\n", size, b_shorten_filename(file), line);
352        BKNI_Fail();
353    }
354    return mem;
355}
356
357void
358BKNI_Free_tagged(void *ptr, const char *file, unsigned line)
359{
360    ASSERT_NOT_CRITICAL();
361
362    if(ptr) {
363        struct BKNI_TrackAllocEntry *entry;
364        size_t size=0;
365        const unsigned history_size = sizeof(g_alloc_state.history)/sizeof(g_alloc_state.history[0]);
366        B_TRACK_ALLOC_LOCK();
367        entry = BKNI_P_GetTrackAllocEntry(ptr);
368        if(entry) {
369            unsigned history_index=g_alloc_state.freed_cnt%history_size;
370            g_alloc_state.history[history_index].alloc = *entry;
371            g_alloc_state.history[history_index].free.file = file;
372            g_alloc_state.history[history_index].free.line = line;
373            g_alloc_state.freed += entry->size;
374            g_alloc_state.freed_cnt++;
375            size = entry->size;
376            entry->mem = NULL;
377        } else if(g_alloc_state.table!=NULL) {
378            unsigned i;
379            BDBG_P_PrintString("BKNI_Free of unknown ptr: %#lx, %s:%u\n", (unsigned long)ptr, b_shorten_filename(file), line);
380            for(i=0;i<history_size;i++) {
381                if(g_alloc_state.history[i].alloc.mem == ptr) {
382                    BDBG_P_PrintString("ptr: %#lx was previously allocated at %s:%u and freed at %s:%u\n", (unsigned long)ptr, b_shorten_filename(g_alloc_state.history[i].alloc.file), g_alloc_state.history[i].alloc.line, b_shorten_filename(g_alloc_state.history[i].free.file), g_alloc_state.history[i].free.line);
383                }
384            }
385            BKNI_Fail();
386        }
387        B_TRACK_ALLOC_UNLOCK();
388#if BKNI_GARBLE_MALLOC
389        b_memset32(ptr, size, 0xDA7ADEAD);
390#endif
391    } else {
392        BDBG_P_PrintString("BKNI_Free of NULL ptr: %#lx, %s:%u\n", (unsigned long)ptr, b_shorten_filename(file), line);
393        BKNI_Fail();
394    }
395    B_TRACK_ALLOC_FREE(ptr);
396    return;
397}
398
399
400
401#define BKNI_DUMP_SIZE_THRESHOLD        (1024)
402
403static void
404BKNI_DumpMallocs_Size(size_t threshold)
405{
406    unsigned i;
407    bool header;
408    struct BKNI_TrackAllocEntry *entries = g_alloc_state.table;
409
410    B_TRACK_ALLOC_LOCK();
411    for (header=false,i=0;entries && i<g_alloc_state.table_size;i++) {
412        if ((entries[i].mem) &&
413            (entries[i].size > threshold)) {
414            if(!header) {
415                header=true;
416                BDBG_P_PrintString("BKNI_Malloc(%s) report:\n%10s, %7s, filename:line\n", B_TRACK_ALLOC_OS, "address", "size");
417            }
418            BDBG_P_PrintString("%#10lx, %7u, %s:%u\n",
419                (unsigned long)entries[i].mem,
420                entries[i].size,
421                b_shorten_filename(entries[i].file),
422                entries[i].line);
423        }
424    }
425    B_TRACK_ALLOC_UNLOCK();
426    BDBG_P_PrintString("BKNI_Malloc(%s): allocated:%u(%u) freed:%u(%u) peak:%u\n", B_TRACK_ALLOC_OS, g_alloc_state.allocated, g_alloc_state.allocated_cnt, g_alloc_state.freed, g_alloc_state.freed_cnt, g_alloc_state.peak);
427    return;
428}
429
430void
431BKNI_DumpMallocs(void)
432{
433    BKNI_DumpMallocs_Size(BKNI_DUMP_SIZE_THRESHOLD);
434}
435
436BERR_Code
437BKNI_GetMallocEntryInfo( const void *mem, struct BKNI_MallocEntryInfo *entry)
438{
439    const struct BKNI_TrackAllocEntry *allocated;
440
441    entry->free_file = NULL;
442    entry->free_line = 0;
443    entry->alive = true;
444    B_TRACK_ALLOC_LOCK();
445    allocated = BKNI_P_GetTrackAllocEntry(mem);
446    if(!allocated) {
447        unsigned i;
448        for(i=0;i<sizeof(g_alloc_state.history)/sizeof(g_alloc_state.history[0]);i++) {
449            if(g_alloc_state.history[i].alloc.mem == mem) {
450                allocated = &g_alloc_state.history[i].alloc;
451                entry->free_file = g_alloc_state.history[i].free.file;
452                entry->free_line = g_alloc_state.history[i].free.line;
453                entry->alive = false;
454                break;
455            }
456        }
457    }
458    if(allocated) {
459        entry->mem = allocated->mem;
460        entry->size = allocated->size;
461        entry->malloc_file = allocated->file;
462        entry->malloc_line = allocated->line;
463    }
464    B_TRACK_ALLOC_UNLOCK();
465    if(allocated) {
466        entry->malloc_file = b_shorten_filename(entry->malloc_file);
467        entry->free_file = b_shorten_filename(entry->free_file);
468        return BERR_SUCCESS;
469    }
470    return BERR_NOT_SUPPORTED;
471}
472
473#undef BKNI_Malloc
474void *
475BKNI_Malloc(size_t size)
476{
477    return BKNI_Malloc_tagged(size, NULL, 0);
478}
479
480#undef BKNI_Free
481void
482BKNI_Free(void *ptr)
483{
484    BKNI_Free_tagged(ptr, NULL, 0);
485    return;
486}
487
488static void
489BKNI_P_TrackAlloc_Init(void)
490{
491    b_memset32(&g_alloc_state, sizeof(g_alloc_state), 0);
492    g_alloc_state.table_size = 4096;
493    g_alloc_state.table = BKNI_P_GetTrackAllocEntry_resize(NULL, 0, g_alloc_state.table_size);
494    return;
495}
496
497static void
498BKNI_P_TrackAlloc_Uninit(void)
499{
500    BKNI_DumpMallocs_Size(0);
501    if(g_alloc_state.table) {
502        B_TRACK_ALLOC_FREE(g_alloc_state.table);
503        g_alloc_state.table = NULL;
504    }
505    return;
506}
507
508#else /* BKNI_TRACK_MALLOCS */
509
510void *
511BKNI_Malloc(size_t size)
512{
513    void *ptr;
514    ASSERT_NOT_CRITICAL();
515    ptr = B_TRACK_ALLOC_ALLOC(size);
516    return ptr;
517}
518
519void
520BKNI_Free(void *ptr)
521{
522    ASSERT_NOT_CRITICAL();
523    B_TRACK_ALLOC_FREE(ptr);
524    return;
525}
526
527void *
528BKNI_Malloc_tagged(size_t size, const char *file, unsigned line)
529{
530    BSTD_UNUSED(file);
531    BSTD_UNUSED(line);
532    return BKNI_Malloc(size);
533}
534
535void
536BKNI_Free_tagged(void *ptr, const char *file, unsigned line)
537{
538    BSTD_UNUSED(file);
539    BSTD_UNUSED(line);
540    BKNI_Free(ptr);
541    return;
542}
543
544BERR_Code BKNI_GetMallocEntryInfo( const void *mem, struct BKNI_MallocEntryInfo *entry)
545{
546    BSTD_UNUSED(mem);
547    BSTD_UNUSED(entry);
548    return BERR_NOT_SUPPORTED;
549}
550
551static void
552BKNI_P_TrackAlloc_Init(void)
553{
554    return;
555}
556
557static void
558BKNI_P_TrackAlloc_Uninit(void)
559{
560    return;
561}
562
563void
564BKNI_DumpMallocs(void)
565{
566    return;
567}
568
569#endif /* BKNI_TRACK_MALLOCS */
570
Note: See TracBrowser for help on using the repository browser.