source: svn/trunk/newcon3bcm2_21bu/magnum/basemodules/kni/linuxuser/bkni.c @ 2

Last change on this file since 2 was 2, checked in by phkim, 11 years ago

1.phkim

  1. revision copy newcon3sk r27
  • Property svn:executable set to *
File size: 38.7 KB
Line 
1/***************************************************************************
2 *     Copyright (c) 2003-2011, Broadcom Corporation
3 *     All Rights Reserved
4 *     Confidential Property of Broadcom Corporation
5 *
6 *  THIS SOFTWARE MAY ONLY BE USED SUBJECT TO AN EXECUTED SOFTWARE LICENSE
7 *  AGREEMENT  BETWEEN THE USER AND BROADCOM.  YOU HAVE NO RIGHT TO USE OR
8 *  EXPLOIT THIS MATERIAL EXCEPT SUBJECT TO THE TERMS OF SUCH AN AGREEMENT.
9 *
10 * $brcm_Workfile: bkni.c $
11 * $brcm_Revision: Hydra_Software_Devel/79 $
12 * $brcm_Date: 10/19/11 2:40p $
13 *
14 * Module Description:
15 *
16 * Implementatation of the Magnum KNI for user space Linux applications.
17 *
18 * Revision History:
19 *
20 * $brcm_Log: /magnum/basemodules/kni/linuxuser/bkni.c $ *
21 *
22 * Hydra_Software_Devel/79   10/19/11 2:40p erickson
23 * CDFEDEMOD-24: reduce minimum BKNI_WaitForEvent timeout to 10 msec and
24 * test for signal even after timeout
25 *
26 * Hydra_Software_Devel/78   6/6/11 3:27p vsilyaev
27 * SW7405-4477: Routed all debug output through buffer and use external
28 * application to extract and print debug output
29 *
30 * Hydra_Software_Devel/77   5/27/11 9:42a erickson
31 * SW7231-179: disable NPTL for android
32 *
33 * Hydra_Software_Devel/76   5/18/11 1:10p erickson
34 * SW7125-949: remove dependency on linux header files. hardcode NPTL
35 * support on.
36 *
37 * Hydra_Software_Devel/75   4/22/11 1:59p erickson
38 * SW3548-2837: fix BKNI_DEBUG_CS_TIMING test
39 *
40 * Hydra_Software_Devel/74   8/10/10 1:10p erickson
41 * SW7405-4294: added BKNI_CS_PRIORITY_ESCALATION, default off
42 *
43 * Hydra_Software_Devel/73   3/17/10 2:06p erickson
44 * SW3548-2837: add comments to prevent future hacking
45 *
46 * Hydra_Software_Devel/72   3/16/10 4:17p erickson
47 * SW3548-2837: add BKNI_DEBUG_CS_TIMING feature, default off
48 *
49 * Hydra_Software_Devel/71   3/10/10 3:00p mward
50 * SW7400-2712:  Add annotation to indicate to Coverity that BKNI_Fail()
51 * is a "kill path".
52 *
53 * Hydra_Software_Devel/70   2/17/10 11:19a vsilyaev
54 * SW7468-112: include <linux/version.h> only for MIPS platform
55 *
56 * Hydra_Software_Devel/69   5/15/09 12:14p jtna
57 * PR54398: redefine B_TRACK_ALLOC_LOCK for linuxuser and remove return
58 * value check
59 *
60 * Hydra_Software_Devel/68   4/30/09 3:45p erickson
61 * PR53778: added #undef instead of #if. allows interoperability between
62 * debug and non debug code.
63 *
64 * Hydra_Software_Devel/67   4/17/09 5:41p vsilyaev
65 * PR 53778: Moved malloc tracking code shareable between OS'es
66 *
67 * Hydra_Software_Devel/66   4/17/09 5:07p vsilyaev
68 * PR 53778: Try to detect double-free blocks and print location where
69 * they were allocated and freed
70 *
71 * Hydra_Software_Devel/65   4/17/09 4:08p vsilyaev
72 * PR 53778: Fixed detection of double-free'd blocks
73 *
74 * Hydra_Software_Devel/64   4/16/09 6:55p vsilyaev
75 * PR 53778: When resizing tracking pool, print top Malloc users, perhaps
76 * it points to the problematic code
77 *
78 * Hydra_Software_Devel/63   4/16/09 5:13p vsilyaev
79 * PR 53778: Use dynammically allocated memory to keep track of allocated
80 * blocks
81 *
82 * Hydra_Software_Devel/62   4/16/09 11:54a jtna
83 * PR51960: BKNI_GARBLE_MALLOC now enabled by default
84 *
85 * Hydra_Software_Devel/61   4/16/09 10:37a erickson
86 * PR53778: temp increase of tracked mallocs to 16K
87 *
88 * Hydra_Software_Devel/60   4/10/09 2:53p vsilyaev
89 * PR 53778: Fixed warnings
90 *
91 * Hydra_Software_Devel/59   4/9/09 1:35p vsilyaev
92 * PR 53778: Keep a history of free'ed objects for better debug facilities
93 *
94 * Hydra_Software_Devel/58   4/7/09 12:42p jtna
95 * PR53778: back out changes. bkni_multi.h already has the definitions
96 *
97 * Hydra_Software_Devel/57   4/6/09 5:37p jtna
98 * PR53778: add BKNI_CreateMutex/DestroyMutex() for BKNI_TRACK_MALLOCS=1
99 *
100 * Hydra_Software_Devel/56   4/2/09 12:49p erickson
101 * PR53778: remove bad header file
102 *
103 * Hydra_Software_Devel/55   4/2/09 12:29p erickson
104 * PR53779: fixed race conditions in BKNI_SetEvent and BKNI_DestroyEvent
105 *
106 * Hydra_Software_Devel/54   4/2/09 11:29a erickson
107 * PR53778: extend BKNI_TRACK_MALLOCS to events and mutexes
108 *
109 * Hydra_Software_Devel/53   2/2/09 10:16a vsilyaev
110 * PR 51741: Don't use CriticalSection when using tracking dynamically
111 * allocated memory
112 *
113 * Hydra_Software_Devel/52   1/30/09 9:23p vsilyaev
114 * PR 51471: Improved report on active allocations, added option to garble
115 * allocated data(to track use of unitialized and/or freed data)
116 *
117 * Hydra_Software_Devel/51   1/30/09 2:59p vsilyaev
118 * PR 42495: Print shorter names
119 *
120 * Hydra_Software_Devel/50   1/30/09 12:06p vsilyaev
121 * PR 42495: Clear alloc statistics on BKNI_Uninit
122 *
123 * Hydra_Software_Devel/49   1/29/09 8:12p vsilyaev
124 * PR 42495: Improved tracking of memory allocation
125 *
126 * Hydra_Software_Devel/48   1/20/09 3:17p erickson
127 * PR50264: back out change for BKNI_WaitForEvent. add better comment.
128 *
129 * Hydra_Software_Devel/47   12/29/08 1:40p erickson
130 * PR50746: added BKNI_Printf to "BKNI_Fail to clarify purpose of the
131 * intentional segfault
132 *
133 * Hydra_Software_Devel/46   12/29/08 10:40a erickson
134 * PR50742: fix spelling of BKNI_AssertIsrContext
135 *
136 * Hydra_Software_Devel/45   12/11/08 2:38p erickson
137 * PR50264: change minimum BKNI_WaitForEvent and BKNI_WaitForGroup from
138 * 100 to 10
139 *
140 * Hydra_Software_Devel/44   11/25/08 5:00p vishk
141 * PR 49401: Coverity Defect ID:11677 UNINIT bkni.c Product=93549
142 * PR 49400: Coverity Defect ID:11678 UNINIT bkni.c Product=93549
143 *
144 * Hydra_Software_Devel/43   11/3/08 5:04p vsilyaev
145 * PR 48347: Consumed EINTR return code in the WaitForEvent/WaitForGroup
146 *
147 * Hydra_Software_Devel/42   10/5/08 9:29p erickson
148 * PR42329: increase default BKNI_MAX_ALLOCS and allow override
149 *
150 * Hydra_Software_Devel/41   9/23/08 5:26p mward
151 * PR44643: For BKNI_WaitForGroup with timeout of 0, return result.
152 * Remove assert added for debug.
153 *
154 * Hydra_Software_Devel/40   9/18/08 3:27p erickson
155 * PR46838: merge
156 *
157 * Hydra_Software_Devel/PR46838/1   9/17/08 4:56p dyzhang
158 * PR46838: remove malloc dump prints from each malloc. And give a
159 * threshold of memory size to filter too many prints
160 *
161 * Hydra_Software_Devel/39   8/21/08 4:43p vsilyaev
162 * PR 32280: Added BKNI_ASSERT_ISR_CONTEXT
163 *
164 * Hydra_Software_Devel/38   7/31/08 11:37a erickson
165 * PR45221: added BDBG_OBJECT to help debug mutex, event and eventgroup
166 * allocation errors
167 *
168 * Hydra_Software_Devel/37   7/16/08 11:19a erickson
169 * PR44853: clean up -Wstrict-prototypes warning
170 *
171 * Hydra_Software_Devel/36   7/10/08 4:18p erickson
172 * PR44643: some linux kernels do not have clock_gettime. also, fixed long
173 * long typecast warnings for 64 bit systems.
174 *
175 * Hydra_Software_Devel/35   7/10/08 10:04a erickson
176 * PR44643: preserve use of gettimeofday for Linux 2.6.12 systems (non-
177 * NPTL)
178 *
179 * Hydra_Software_Devel/34   7/9/08 2:38p erickson
180 * PR44643: fix BKNI_WaitForGroup with timeout of 0
181 *
182 * Hydra_Software_Devel/33   7/9/08 12:11p erickson
183 * PR44643: switch to clock_gettime so we can tolerate settimeofday
184 *
185 * Hydra_Software_Devel/32   5/27/08 4:28p erickson
186 * PR42929: Print BDBG_WRN for negative timeouts that are not
187 * BKNI_INFINITE. It's highly likely to be an app bug. See comment in
188 * code.
189 *
190 * Hydra_Software_Devel/31   5/8/08 9:06a erickson
191 * PR42329: call BKNI_DumpMallocs if BKNI_Malloc is going to return NULL
192 *
193 * Hydra_Software_Devel/30   5/7/08 10:54a erickson
194 * PR42495: add BKNI_TRACK_MALLOCS feature, defaulted off
195 *
196 * Hydra_Software_Devel/29   4/9/08 11:06a mward
197 * PR41378: Use tv_nsec >= 1000000000, NPTL pthread_cond_timedwait()
198 * rejects = 1000000000.
199 *
200 * Hydra_Software_Devel/28   3/17/08 9:53a erickson
201 * PR40592: check if pthread_self is working
202 *
203 * Hydra_Software_Devel/27   11/27/07 10:33a jgarrett
204 * PR 37550: Coverity fixes
205 *
206 * Hydra_Software_Devel/26   1/23/07 1:50p erickson
207 * PR27252: If BKNI_SetEvent is called (e.g. from isr) while
208 * BKNI_RemoveEventGroup is pending, the group mutex will stay locked.
209 * SetEvent needs to guarantee that it will unlock any mutex it locked.
210 *
211 * Hydra_Software_Devel/25   10/10/06 5:03p jgarrett
212 * PR 24626: Adding assertions when KNI code is called from the wrong
213 * context
214 *
215 * Hydra_Software_Devel/PR24626/1   10/5/06 3:08p jgarrett
216 * PR 24626: Adding assertions when KNI code is called from the wrong
217 * context
218 *
219 * Hydra_Software_Devel/24   6/29/06 4:07p vsilyaev
220 * PR 22357: Fixed signal interraction in BKNI_Sleep
221 *
222 * Hydra_Software_Devel/23   4/12/06 4:46p jgarrett
223 * PR 20873: For 7400, temporarily making BKNI_Fail() cause a segmentation
224 * fault instead of calling abort().
225 *
226 * Hydra_Software_Devel/22   7/28/05 10:55a vsilyaev
227 * PR14028: Fixed KNI_Delay so it has chance to work.
228 *
229 * Hydra_Software_Devel/21   2/7/05 10:34a erickson
230 * PR14028: fixed compiler warning
231 *
232 * Hydra_Software_Devel/20   2/7/05 9:23a erickson
233 * PR14028: fix impl of KNI_Delay to use busy loop, not scheduler sleep
234 *
235 * Hydra_Software_Devel/19   2/4/05 9:26a erickson
236 * PR13046: fixed return code check
237 *
238 * Hydra_Software_Devel/18   10/27/04 10:50a vsilyaev
239 * PR 13046: Fixed leak of group signle in the SetEvent function.
240 *
241 * Hydra_Software_Devel/17   10/19/04 4:19p vsilyaev
242 * PR 13046: Added precondition in BKNI_WaitForGroup.
243 *
244 * Hydra_Software_Devel/16   7/9/04 8:53a erickson
245 * PR11771: convert timeout WRN to MSG
246 *
247 * Hydra_Software_Devel/15   3/11/04 8:06p vsilyaev
248 * PR 9736: Fixed compile problem in the last commit.
249 *
250 * Hydra_Software_Devel/14   3/11/04 7:19p vsilyaev
251 * PR 9736: Fixed potential synchronization issues between Event and
252 * EventGroup.
253 *
254 * Hydra_Software_Devel/13   2/19/04 11:14a vsilyaev
255 * PR 9736: Improved error propagation in the BKNI_RemoveEventGroup
256 * function.
257 *
258 * Hydra_Software_Devel/12   2/12/04 9:31p vsilyaev
259 * PR 9736: Don't try to acquire mutex during destroy.
260 *
261 * Hydra_Software_Devel/11   2/12/04 10:52a erickson
262 * PR9736: initialize group to NULL at create
263 *
264 * Hydra_Software_Devel/10   2/11/04 10:20p vsilyaev
265 * PR 9736: Added implementation for the event multiplexing.
266 *
267 * Hydra_Software_Devel/9   1/13/04 6:16p vsilyaev
268 * PR 9290: Add code to lower bound timeout to 200ms.
269 *
270 * Hydra_Software_Devel/8   10/17/03 9:01a vsilyaev
271 * Added standard header.
272 *
273 ***************************************************************************/
274
275#include "bstd.h"
276#include "bkni.h"
277#include "bkni_multi.h"
278#include "bkni_metrics.h"
279#include "bkni_event_group.h"
280#include "blst_list.h"
281#include "bdbg_priv.h"
282
283#include <unistd.h>
284#include <stdlib.h>
285#include <stdio.h>
286#include <stdarg.h>
287#include <errno.h>
288#include <pthread.h>
289#include <time.h>
290#include <string.h>
291#include <stdlib.h>
292
293BDBG_MODULE(kernelinterface);
294
295BDBG_OBJECT_ID(BKNI_EventGroup);
296BDBG_OBJECT_ID(BKNI_Event);
297BDBG_OBJECT_ID(BKNI_Mutex);
298
299struct BKNI_MutexObj
300{
301    BDBG_OBJECT(BKNI_Mutex)
302    pthread_mutex_t mutex;
303};
304
305void * BKNI_Malloc_tagged(size_t size, const char *file, unsigned line);
306void BKNI_Free_tagged(void *ptr, const char *file, unsigned line);
307static unsigned long BKNI_P_GetMicrosecondTick(void);
308
309
310#if defined(__mips__)
311/* avoid including linux kernel header files here because this is linuxuser.
312we support NPTL in all versions of linux, so I'm hardcoding it on.
313Android's bionic C does not have these NPTL API's. */
314#if !defined(B_REFSW_ANDROID)
315#define HAS_NPTL 1
316#endif
317#endif /* __mips__ */
318
319#if !HAS_NPTL
320#include <sys/time.h>
321#endif
322
323struct BKNI_GroupObj
324{
325    BDBG_OBJECT(BKNI_EventGroup)
326    BLST_D_HEAD(group, BKNI_EventObj) members;
327    pthread_mutex_t lock;            /* mutex for protecting signal and conditional variables */
328    pthread_cond_t  cond;           /* condition to wake up from event*/
329};
330
331struct BKNI_EventObj
332{
333    BDBG_OBJECT(BKNI_Event)
334    BLST_D_ENTRY(BKNI_EventObj) list;
335    struct BKNI_GroupObj *group;
336    pthread_mutex_t lock;            /* mutex for protecting signal and conditional variables */
337    pthread_cond_t  cond;           /* condition to wake up from event*/
338    bool signal;
339};
340
341static pthread_mutex_t g_csMutex = PTHREAD_MUTEX_INITIALIZER;
342
343#if BDBG_DEBUG_BUILD
344
345static pthread_t g_csOwner;
346
347#define SET_CRITICAL() do { g_csOwner = pthread_self(); } while (0)
348#define CLEAR_CRITICAL() do { g_csOwner = (pthread_t)0; } while (0)
349#define CHECK_CRITICAL() ( g_csOwner == pthread_self() )
350
351#define ASSERT_CRITICAL() do \
352{\
353    if ( !CHECK_CRITICAL() )\
354    {\
355        BDBG_P_PrintString("Error, must be in critical section to call %s\n", __FUNCTION__);\
356        BKNI_Fail();\
357    }\
358} while (0)
359
360#define ASSERT_NOT_CRITICAL() do \
361{\
362    if ( CHECK_CRITICAL() )\
363    {\
364        BDBG_P_PrintString("Error, must not be in critical section to call %s\n", __FUNCTION__);\
365        BKNI_Fail();\
366    }\
367} while (0)
368
369#else
370
371#define ASSERT_CRITICAL() (void)0
372#define ASSERT_NOT_CRITICAL() (void)0
373#define SET_CRITICAL() (void)0
374#define CLEAR_CRITICAL() (void)0
375#define CHECK_CRITICAL() 0
376
377#endif
378
379#if BKNI_TRACK_MALLOCS
380static pthread_mutex_t g_alloc_state_mutex = PTHREAD_MUTEX_INITIALIZER;
381#endif
382
383#define B_TRACK_ALLOC_LOCK() do {if (pthread_mutex_lock(&g_alloc_state_mutex)) {BKNI_Fail();}}while(0)
384#define B_TRACK_ALLOC_UNLOCK() do {if (pthread_mutex_unlock(&g_alloc_state_mutex)) {BKNI_Fail();}}while(0)
385#define B_TRACK_ALLOC_ALLOC(size) malloc(size)
386#define B_TRACK_ALLOC_FREE(ptr) free(ptr)
387#define B_TRACK_ALLOC_OS "linuxuser"
388
389#include "bkni_track_mallocs.inc"
390
391
392
393BERR_Code BKNI_Init(void)
394{
395#if BDBG_DEBUG_BUILD
396    if (pthread_self() == 0) {
397        /* If this fails, a library outside of magnum has failed. KNI requires this to work. */
398        BKNI_Fail();
399    }
400#endif
401    BKNI_P_TrackAlloc_Init();
402
403    return BERR_SUCCESS;
404}
405
406/* coverity[+kill]  */
407void BKNI_Fail(void)
408{
409    /* Derefering 0 will cause a SIGSEGV will usually produce a core dump. */
410    BDBG_P_PrintString("BKNI_Fail is intentionally causing a segfault. Please inspect any prior error messages or get a core dump stack trace to determine the cause of failure.\n");
411    *(volatile unsigned char *)0;
412}
413
414BERR_Code BKNI_CreateMutex_tagged(BKNI_MutexHandle *handle, const char *file, int line)
415{
416    ASSERT_NOT_CRITICAL();
417
418    *handle = (BKNI_MutexHandle)BKNI_Malloc_tagged(sizeof(**handle), file, line);
419    if (!*handle) {
420        return BERR_TRACE(BERR_OS_ERROR);
421    }
422    BDBG_OBJECT_SET(*handle, BKNI_Mutex);
423
424    /* WARNING: Do not make BKNI_MutexHandle a recursive mutex. The usual motivation for doing this is to allow recursive calls back into
425    Nexus or Magnum from custom callouts. That would be a violation of Nexus and Magnum architecture and will cause catastrophic failure.
426    If your application needs its own recursive mutex, please create your own function and leave this unmodified. */
427    if (pthread_mutex_init(&(*handle)->mutex, NULL)) {
428        BDBG_OBJECT_DESTROY(*handle, BKNI_Mutex);
429        free(*handle);
430        return BERR_TRACE(BERR_OS_ERROR);
431    } else {
432        return BERR_SUCCESS;
433    }
434}
435
436void
437BKNI_DestroyMutex_tagged(BKNI_MutexHandle handle, const char *file, int line)
438{
439    ASSERT_NOT_CRITICAL();
440    BDBG_OBJECT_ASSERT(handle, BKNI_Mutex);
441
442    pthread_mutex_destroy(&handle->mutex);
443    BDBG_OBJECT_DESTROY(handle, BKNI_Mutex);
444    BKNI_Free_tagged(handle, file, line);
445    return ;
446}
447
448#undef BKNI_CreateMutex
449BERR_Code BKNI_CreateMutex(BKNI_MutexHandle *handle)
450{
451    return BKNI_CreateMutex_tagged(handle, NULL, 0);
452}
453
454#undef BKNI_DestroyMutex
455void BKNI_DestroyMutex(BKNI_MutexHandle handle)
456{
457    BKNI_DestroyMutex_tagged(handle, NULL, 0);
458}
459
460BERR_Code
461BKNI_TryAcquireMutex(BKNI_MutexHandle handle)
462{
463    int rc;
464
465    ASSERT_NOT_CRITICAL();
466    BDBG_OBJECT_ASSERT(handle, BKNI_Mutex);
467
468    rc = pthread_mutex_trylock(&handle->mutex);
469    if (rc==0) {
470        return BERR_SUCCESS;
471    } else if (rc==EBUSY) {
472        return BERR_TIMEOUT;
473    } else {
474        return BERR_TRACE(BERR_OS_ERROR);
475    }
476}
477
478BERR_Code
479BKNI_AcquireMutex(BKNI_MutexHandle handle)
480{
481    ASSERT_NOT_CRITICAL();
482    BDBG_OBJECT_ASSERT(handle, BKNI_Mutex);
483
484    if (pthread_mutex_lock(&handle->mutex))
485        return BERR_TRACE(BERR_OS_ERROR);
486    else
487        return BERR_SUCCESS;
488}
489
490void
491BKNI_ReleaseMutex(BKNI_MutexHandle handle)
492{
493    ASSERT_NOT_CRITICAL();
494    BDBG_OBJECT_ASSERT(handle, BKNI_Mutex);
495
496    if (pthread_mutex_unlock(&handle->mutex)) {
497        BDBG_ERR(("pthread_mutex_unlock failed"));
498        BDBG_ASSERT(false);
499    }
500    return ;
501}
502
503#if BKNI_DEBUG_CS_TIMING
504static unsigned long g_csTimeStart;
505static const char *g_csFile;
506static int g_csLine;
507#endif
508
509/* BKNI_CS_PRIORITY_ESCALATION is a simple form of priority inheritence.
510In linux user mode, critical sections can be pre-empted and priority inversion problems can result.
511Instead of actually inheriting, this sets any thread entering a critical section to be highest priority & policy for pthreads.
512It currently defaults off. */
513/* #define BKNI_CS_PRIORITY_ESCALATION 1 */
514#if BKNI_CS_PRIORITY_ESCALATION
515static int g_cs_policy = -1;
516static struct sched_param g_cs_sched_param;
517#endif
518
519void BKNI_EnterCriticalSection_tagged(const char *file, unsigned line)
520{
521#if BKNI_CS_PRIORITY_ESCALATION
522      int local_policy;
523      struct sched_param local_sched_param, new_sched_param;
524#endif
525
526    ASSERT_NOT_CRITICAL();
527
528#if BKNI_CS_PRIORITY_ESCALATION
529      pthread_getschedparam(pthread_self(), &local_policy, &local_sched_param);
530      memcpy(&new_sched_param, &local_sched_param, sizeof(new_sched_param));
531      new_sched_param.sched_priority = 99;
532
533      /* Temporarily increase thread priority to highest. Do this before trying to lock the mutex. */
534      pthread_setschedparam(pthread_self(), SCHED_FIFO, &new_sched_param);
535#endif
536
537    /* WARNING: Do not make g_csMutex a recursive mutex. The usual motivation for doing this is to allow ISR code to call
538    into non-ISR code. That would be a violation of Magnum architecture and will cause catastrophic failure. If your application
539    needs its own recursive critical section, please create your own function and leave this unmodified. */
540    if (pthread_mutex_lock(&g_csMutex)!=0)
541    {
542        BDBG_ERR(("pthread_mutex_lock failed"));
543        BDBG_ASSERT(false);
544        return;
545    }
546
547#if BKNI_CS_PRIORITY_ESCALATION
548    /* now that we have the CS mutex, we can store the local values into the global state */
549    g_cs_policy = local_policy;
550    g_cs_sched_param = local_sched_param;
551#endif
552
553    SET_CRITICAL();
554
555#if BKNI_DEBUG_CS_TIMING
556    g_csTimeStart = BKNI_P_GetMicrosecondTick();
557    g_csFile = file;
558    g_csLine = line;
559#else
560    BSTD_UNUSED(file);
561    BSTD_UNUSED(line);
562#endif
563}
564
565void
566BKNI_LeaveCriticalSection_tagged(const char *file, unsigned line)
567{
568#if BKNI_DEBUG_CS_TIMING
569    uint32_t currentCount, elapsedCount;
570#endif
571#if BKNI_CS_PRIORITY_ESCALATION
572    /* copy escalated thread priority/policy into local storage before releasing the CS mutex */
573    int local_policy = g_cs_policy;
574    struct sched_param local_sched_param = g_cs_sched_param;
575#endif
576
577#if BKNI_DEBUG_CS_TIMING
578    /* Snapshot time */
579    currentCount = BKNI_P_GetMicrosecondTick();
580    if ( currentCount <= g_csTimeStart )
581    {
582        elapsedCount = currentCount - g_csTimeStart;
583    }
584    else
585    {
586        elapsedCount = currentCount + (0xFFFFFFFFUL-g_csTimeStart);
587    }
588    if ( elapsedCount > 10 * 1000 ) /* 10 milliseconds */
589    {
590        BDBG_P_PrintString("Long CS detected (%u.%u ms).\nEntered: %s:%d\nLeaving %s:%d\n",
591               elapsedCount/1000, elapsedCount%1000, g_csFile, g_csLine, file, line);
592    }
593#else
594    BSTD_UNUSED(file);
595    BSTD_UNUSED(line);
596#endif
597
598    ASSERT_CRITICAL();
599    CLEAR_CRITICAL();
600
601    if (pthread_mutex_unlock(&g_csMutex))
602    {
603        BDBG_ERR(("pthread_mutex_unlock failed"));
604        BDBG_ASSERT(false);
605    }
606
607#if BKNI_CS_PRIORITY_ESCALATION
608    /* restore this thread's settings from before the EnterCriticalSection */
609    pthread_setschedparam(pthread_self(), local_policy, &local_sched_param);
610#endif
611
612    return;
613}
614
615#if !BKNI_DEBUG_CS_TIMING
616void BKNI_EnterCriticalSection() { BKNI_EnterCriticalSection_tagged(NULL, 0); }
617void BKNI_LeaveCriticalSection() { BKNI_LeaveCriticalSection_tagged(NULL, 0); }
618#endif
619
620int
621BKNI_Printf(const char *fmt, ...)
622{
623    va_list arglist;
624    int rc;
625
626    va_start( arglist, fmt );
627    rc = vfprintf(stderr, fmt, arglist);
628    va_end(arglist);
629
630    return rc;
631}
632
633
634int
635BKNI_Snprintf(char *str, size_t len, const char *fmt, ...)
636{
637    va_list arglist;
638    int rc;
639
640    va_start( arglist, fmt );
641    rc = vsnprintf(str, len, fmt, arglist);
642    va_end(arglist);
643
644    return rc;
645}
646
647int
648BKNI_Vprintf(const char *fmt, va_list ap)
649{
650    return vfprintf(stderr, fmt, ap);
651}
652
653static unsigned long BKNI_P_GetMicrosecondTick(void)
654{
655#if !HAS_NPTL
656    int rc;
657    struct timeval now;
658    rc = gettimeofday(&now, NULL);
659    if (rc) {
660        rc = BERR_TRACE(BERR_OS_ERROR);
661        return 0;
662    }
663    return now.tv_sec * 1000000 + now.tv_usec;
664#else
665    int rc;
666    struct timespec now;
667    /* It's ok to use clock_gettime even without NPTL. */
668    rc = clock_gettime(CLOCK_MONOTONIC, &now);
669    if (rc) {
670        rc = BERR_TRACE(BERR_OS_ERROR);
671        return 0;
672    }
673    return now.tv_sec * 1000000 + now.tv_nsec / 1000;
674#endif
675}
676
677/**
678BKNI_Delay impl notes:
679This is an incredibly inefficient implementation...which is exactly
680the point. Because the linux scheduler has a 10 milisecond clock tick,
681this function should not hit the scheduler. It must use a busy loop.
682sleep and usleep use the scheduler. nanasleep will use the scheduler
683unless the pthread priority is high, which we cannot assume in this function.
684Therefore a busy loop with a fine-grain time syscall does the job.
685*/
686void
687BKNI_Delay(unsigned int microsec)
688{
689    unsigned long start;
690    unsigned long diff;
691    start = BKNI_P_GetMicrosecondTick();
692    do {
693        diff = BKNI_P_GetMicrosecondTick() - start;
694    } while (diff < microsec);
695    return;
696}
697
698BERR_Code
699BKNI_Sleep(unsigned int millisec)
700{
701    struct timespec delay;
702    struct timespec rem;
703    int rc;
704
705    ASSERT_NOT_CRITICAL();
706
707    delay.tv_sec = millisec/1000;
708    delay.tv_nsec = 1000 * 1000 * (millisec%1000);
709
710    for(;;) {
711        rc = nanosleep(&delay, &rem); /* [u]sleep can't be used because it uses SIGALRM */
712        if (rc!=0) {
713            if (errno==EINTR) {
714                delay = rem; /* sleep again */
715                continue;
716            }
717            return BERR_TRACE(BERR_OS_ERROR);
718        }
719        break; /* done */
720    }
721
722    return BERR_SUCCESS;
723}
724
725
726BERR_Code
727BKNI_CreateEvent_tagged(BKNI_EventHandle *pEvent, const char *file, int line)
728{
729    BKNI_EventHandle event;
730    int rc;
731    BERR_Code result=BERR_SUCCESS;
732    /* coverity[var_decl: FALSE] */
733    pthread_condattr_t attr;
734
735    ASSERT_NOT_CRITICAL();
736
737    event = BKNI_Malloc_tagged(sizeof(*event), file, line);
738    *pEvent = event;
739    if ( !event) {
740        result = BERR_TRACE(BERR_OS_ERROR);
741        goto err_no_memory;
742    }
743    BDBG_OBJECT_SET(event, BKNI_Event);
744
745    rc = pthread_mutex_init (&event->lock, NULL /* default attributes */);
746    if (rc!=0) {
747        result = BERR_TRACE(BERR_OS_ERROR);
748        goto err_mutex;
749    }
750
751    /* coverity[uninit_use_in_call: FALSE] */
752    rc = pthread_condattr_init(&attr);
753    if (rc!=0) {
754        result = BERR_TRACE(BERR_OS_ERROR);
755        goto err_condvar;
756    }
757
758#if HAS_NPTL
759    rc = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
760    if (rc!=0) {
761        result = BERR_TRACE(BERR_OS_ERROR);
762        goto err_condvar;
763    }
764#endif
765
766    rc = pthread_cond_init( &event->cond, &attr);
767    if (rc!=0) {
768        result = BERR_TRACE(BERR_OS_ERROR);
769        goto err_condvar;
770    }
771    event->signal = false;
772    event->group = NULL;
773
774    return result;
775
776err_condvar:
777    pthread_mutex_destroy(&event->lock);
778err_mutex:
779    BDBG_OBJECT_DESTROY(event, BKNI_Event);
780    free(event);
781err_no_memory:
782    return result;
783}
784
785void
786BKNI_DestroyEvent_tagged(BKNI_EventHandle event, const char *file, int line)
787{
788    int rc;
789    BKNI_EventGroupHandle group;
790
791    ASSERT_NOT_CRITICAL();
792    BDBG_OBJECT_ASSERT(event, BKNI_Event);
793    group = event->group;
794    /* At this point, we may have been removed from the group and event->group is NULL.
795    This would be poor application code, but KNI should protect itself. */
796
797    if (group) {
798        BDBG_WRN(("Event %#x still in the group %#x, removing it", (unsigned)(unsigned long)event, (unsigned)(unsigned long)group));
799        rc = pthread_mutex_lock(&group->lock);
800        if (rc!=0) {
801            BDBG_ERR(("pthread_mutex_lock %d", rc));
802            BDBG_ASSERT(false);
803        }
804        /* if the group does not match, then the caller needs to fix their code. we can't have an event being added & removed from various
805        groups and being destroyed at the same time. */
806        BDBG_ASSERT(event->group == group);
807        BLST_D_REMOVE(&group->members, event, list);
808        pthread_mutex_unlock(&group->lock);
809    }
810    rc = pthread_mutex_destroy(&event->lock);
811    if (rc!=0) {
812        BDBG_ERR(("pthread_mutex_destroy: %d", rc));
813        BDBG_ASSERT(false);
814    }
815    rc = pthread_cond_destroy(&event->cond);
816    if (rc!=0) {
817        BDBG_ERR(("pthread_cond_destroy: %d", rc));
818        BDBG_ASSERT(false);
819    }
820    BDBG_OBJECT_DESTROY(event, BKNI_Event);
821    BKNI_Free_tagged(event, file, line);
822    return ;
823}
824
825#undef BKNI_CreateEvent
826BERR_Code BKNI_CreateEvent(BKNI_EventHandle *pEvent)
827{
828    return BKNI_CreateEvent_tagged(pEvent, NULL, 0);
829}
830
831#undef BKNI_DestroyEvent
832void BKNI_DestroyEvent(BKNI_EventHandle event)
833{
834    BKNI_DestroyEvent_tagged(event, NULL, 0);
835}
836
837/* return a timespec which is the current time plus an increment */
838static int BKNI_P_SetTargetTime(struct timespec *target, int timeoutMsec)
839{
840    int rc;
841#if !HAS_NPTL
842    /* Unless pthread can set CLOCK_MONOTONIC, we cannot use clock_gettime(CLOCK_MONOTONIC). This is only available with NPTL linux. */
843    struct timeval now;
844    rc = gettimeofday(&now, NULL);
845    if (rc!=0) {
846        return BERR_TRACE(BERR_OS_ERROR);
847    }
848    target->tv_nsec = now.tv_usec * 1000 + (timeoutMsec%1000)*1000000;
849    target->tv_sec = now.tv_sec + (timeoutMsec/1000);
850    if (target->tv_nsec >= 1000000000) {
851        target->tv_nsec -=  1000000000;
852        target->tv_sec ++;
853    }
854#else
855    struct timespec now;
856    rc = clock_gettime(CLOCK_MONOTONIC, &now);
857    if (rc!=0) {
858        return BERR_TRACE(BERR_OS_ERROR);
859    }
860    target->tv_nsec = now.tv_nsec + (timeoutMsec%1000)*1000000;
861    target->tv_sec = now.tv_sec + (timeoutMsec/1000);
862    if (target->tv_nsec >= 1000000000) {
863        target->tv_nsec -=  1000000000;
864        target->tv_sec ++;
865    }
866#endif
867    return 0;
868}
869
870BERR_Code
871BKNI_WaitForEvent(BKNI_EventHandle event, int timeoutMsec)
872{
873    int rc;
874    BERR_Code result = BERR_SUCCESS;
875    struct timespec target;
876
877    if ( timeoutMsec != 0 )
878    {
879        ASSERT_NOT_CRITICAL();
880    }
881    BDBG_OBJECT_ASSERT(event, BKNI_Event);
882
883    if (timeoutMsec!=0 && timeoutMsec!=BKNI_INFINITE) {
884        if (timeoutMsec<0) {
885            /* If your app is written to allow negative values to this function, then it's highly likely you would allow -1, which would
886            result in an infinite hang. We recommend that you only pass positive values to this function unless you definitely mean BKNI_INFINITE. */
887            BDBG_WRN(("BKNI_WaitForEvent given negative timeout. Possible infinite hang if timeout happens to be -1 (BKNI_INFINITE)."));
888        }
889        if (timeoutMsec<10) {
890            timeoutMsec=10; /* This is used to achieve consistency between different OS's. */
891        }
892        rc = BKNI_P_SetTargetTime(&target, timeoutMsec);
893        if (rc) {
894            return BERR_TRACE(BERR_OS_ERROR);
895        }
896    }
897
898    rc = pthread_mutex_lock(&event->lock);
899    if (rc!=0) {
900        return BERR_TRACE(BERR_OS_ERROR);
901    }
902    if (event->signal) {
903        event->signal = false;
904        goto done;
905    }
906    if (timeoutMsec == 0) { /* wait without timeout */
907        /* It is normal that BKNI_WaitForEvent could time out. Do not use BERR_TRACE. */
908        result = BERR_TIMEOUT;
909        goto done;
910    }
911    do {
912        if (timeoutMsec == BKNI_INFINITE) {
913            rc = pthread_cond_wait(&event->cond, &event->lock);
914        } else {
915            rc = pthread_cond_timedwait(&event->cond, &event->lock, &target);
916            if (event->signal) {
917                /* even if we timed out, if the signal was set, we succeed. this allows magnum to
918                be resilient to large OS scheduler delays */
919                result = BERR_SUCCESS;
920                break;
921            }
922            if (rc==ETIMEDOUT) {
923                BDBG_MSG(("BKNI_WaitForEvent(%#x): timeout", (unsigned)(unsigned long)event));
924                result = BERR_TIMEOUT;
925                goto done;
926            }
927        }
928        if(rc==EINTR) {
929            BDBG_MSG(("BKNI_WaitForEvent(%#x): interrupted", (unsigned)(unsigned long)event));
930            continue;
931        }
932        if (rc!=0) {
933            result = BERR_TRACE(BERR_OS_ERROR);
934            goto done;
935        }
936    } while(!event->signal);  /* we might have been wokenup and then event has been cleared */
937
938    event->signal = false;
939done:
940    pthread_mutex_unlock(&event->lock);
941    return result;
942}
943
944void
945BKNI_SetEvent(BKNI_EventHandle event)
946{
947    int rc;
948    BKNI_EventGroupHandle group;
949
950    BDBG_OBJECT_ASSERT(event, BKNI_Event);
951    group = event->group;
952    /* At this point, we may have been removed from the group and event->group is NULL.
953    This is a real possibility because BKNI_SetEvent can be called from an ISR.
954    Caching the group pointer allows us to safely unlock still. */
955
956    if (group) {
957        rc = pthread_mutex_lock(&group->lock);
958        BDBG_ASSERT(0 == rc);
959    }
960    rc = pthread_mutex_lock(&event->lock);
961    if (rc!=0) {
962        BDBG_ERR(("pthread_mutex_lock: %d", rc));
963        BDBG_ASSERT(false);
964    }
965    event->signal = true;
966    rc = pthread_cond_signal(&event->cond);
967    if (rc!=0) {
968        BDBG_ERR(("pthread_cond_signal: %d", rc));
969        BDBG_ASSERT(false);
970    }
971    if (group) {
972        rc = pthread_cond_signal(&group->cond);
973        if (rc!=0) {
974            BDBG_ERR(("pthread_cond_signal: %d, ignored", rc));
975        }
976    }
977    rc = pthread_mutex_unlock(&event->lock);
978    if (rc!=0) {
979        BDBG_ERR(("pthread_mutex_unlock: %d", rc));
980        BDBG_ASSERT(false);
981    }
982    if (group) {
983        pthread_mutex_unlock(&group->lock);
984    }
985    return ;
986}
987
988void
989BKNI_ResetEvent(BKNI_EventHandle event)
990{
991    int rc;
992
993    BDBG_OBJECT_ASSERT(event, BKNI_Event);
994    rc = pthread_mutex_lock(&event->lock);
995    if (rc!=0) {
996        BDBG_ERR(("pthread_mutex_lock: %d", rc));
997        BDBG_ASSERT(false);
998    }
999    event->signal = false ;
1000    rc = pthread_mutex_unlock(&event->lock);
1001    if (rc!=0) {
1002        BDBG_ERR(("pthread_mutex_unlock: %d", rc));
1003        BDBG_ASSERT(false);
1004    }
1005    return ;
1006}
1007
1008int
1009BKNI_Vsnprintf(char *s, size_t n, const char *fmt, va_list ap)
1010{
1011    return vsnprintf(s, n, fmt, ap);
1012}
1013
1014BERR_Code
1015BKNI_CreateEventGroup(BKNI_EventGroupHandle *pGroup)
1016{
1017    int rc;
1018    BKNI_EventGroupHandle group;
1019    BERR_Code result;
1020    /* coverity[var_decl: FALSE] */
1021    pthread_condattr_t attr;
1022
1023    ASSERT_NOT_CRITICAL();
1024
1025    group = malloc(sizeof(*group));
1026    if (!group) {
1027        result = BERR_TRACE(BERR_OUT_OF_SYSTEM_MEMORY);
1028        goto err_no_memory;
1029    }
1030    BDBG_OBJECT_SET(group, BKNI_EventGroup);
1031
1032    BLST_D_INIT(&group->members);
1033    rc = pthread_mutex_init (&group->lock, NULL /* default attributes */);
1034    if (rc!=0) {
1035        result = BERR_TRACE(BERR_OS_ERROR);
1036        goto err_mutex;
1037    }
1038
1039    /* coverity[uninit_use_in_call: FALSE] */
1040    rc = pthread_condattr_init(&attr);
1041    if (rc!=0) {
1042        result = BERR_TRACE(BERR_OS_ERROR);
1043        goto err_condvar;
1044    }
1045
1046#if HAS_NPTL
1047    rc = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
1048    if (rc!=0) {
1049        result = BERR_TRACE(BERR_OS_ERROR);
1050        goto err_condvar;
1051    }
1052#endif
1053
1054    rc = pthread_cond_init( &group->cond, &attr);
1055    if (rc!=0) {
1056        result = BERR_TRACE(BERR_OS_ERROR);
1057        goto err_condvar;
1058    }
1059    *pGroup = group;
1060
1061    return BERR_SUCCESS;
1062
1063err_condvar:
1064    pthread_mutex_destroy(&group->lock);
1065err_mutex:
1066    BDBG_OBJECT_DESTROY(group, BKNI_EventGroup);
1067    free(group);
1068err_no_memory:
1069    return result;
1070}
1071
1072void
1073BKNI_DestroyEventGroup(BKNI_EventGroupHandle group)
1074{
1075    int rc;
1076    BKNI_EventHandle event;
1077
1078    ASSERT_NOT_CRITICAL();
1079    BDBG_OBJECT_ASSERT(group, BKNI_EventGroup);
1080
1081    rc = pthread_mutex_lock(&group->lock);
1082    if (rc<0) {
1083        BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1084        BDBG_ASSERT(false);
1085    }
1086
1087    while(NULL != (event=BLST_D_FIRST(&group->members)) ) {
1088        BDBG_ASSERT(event->group == group);
1089        event->group = NULL;
1090        BLST_D_REMOVE_HEAD(&group->members, list);
1091    }
1092    pthread_mutex_unlock(&group->lock);
1093    /* NOTE: to avoid this race condition, app must ensure that no SetEvent for this group is pending at this time */
1094    pthread_mutex_destroy(&group->lock);
1095    pthread_cond_destroy(&group->cond);
1096    BDBG_OBJECT_DESTROY(group, BKNI_EventGroup);
1097    free(group);
1098    return;
1099}
1100
1101
1102BERR_Code
1103BKNI_AddEventGroup(BKNI_EventGroupHandle group, BKNI_EventHandle event)
1104{
1105    int rc;
1106    BERR_Code result = BERR_SUCCESS;
1107
1108    ASSERT_NOT_CRITICAL();
1109    BDBG_OBJECT_ASSERT(group, BKNI_EventGroup);
1110    BDBG_OBJECT_ASSERT(event, BKNI_Event);
1111
1112    /* IMPORTANT: group lock shall be acquired before event lock */
1113    rc = pthread_mutex_lock(&group->lock);
1114    if (rc!=0) {
1115        BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1116        BDBG_ASSERT(false);
1117    }
1118    rc = pthread_mutex_lock(&event->lock);
1119    if (rc!=0) {
1120        BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1121        BDBG_ASSERT(false);
1122    }
1123    if (event->group != NULL) {
1124        BDBG_ERR(("Event %#x already connected to the group %#x", (unsigned)(unsigned long)event, (unsigned)(unsigned long)group));
1125        result = BERR_TRACE(BERR_OS_ERROR);
1126    } else {
1127        BLST_D_INSERT_HEAD(&group->members, event, list);
1128        event->group = group;
1129        if (event->signal) {
1130            /* signal condition if signal already set */
1131            pthread_cond_signal(&group->cond);
1132        }
1133    }
1134    rc = pthread_mutex_unlock(&event->lock);
1135    if (rc!=0) {
1136        BDBG_ERR(("pthread_mutex_unlock failed, rc=%d", rc));
1137        BDBG_ASSERT(false);
1138    }
1139    rc = pthread_mutex_unlock(&group->lock);
1140    if (rc!=0) {
1141        BDBG_ERR(("pthread_mutex_unlock failed, rc=%d", rc));
1142        BDBG_ASSERT(false);
1143    }
1144    return result;
1145}
1146
1147BERR_Code
1148BKNI_RemoveEventGroup(BKNI_EventGroupHandle group, BKNI_EventHandle event)
1149{
1150    int rc;
1151    BERR_Code result = BERR_SUCCESS;
1152
1153    ASSERT_NOT_CRITICAL();
1154    BDBG_OBJECT_ASSERT(group, BKNI_EventGroup);
1155    BDBG_OBJECT_ASSERT(event, BKNI_Event);
1156
1157    rc = pthread_mutex_lock(&group->lock);
1158    if (rc!=0) {
1159        BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1160        BDBG_ASSERT(false);
1161    }
1162    rc = pthread_mutex_lock(&event->lock);
1163    if (rc!=0) {
1164        BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1165        BDBG_ASSERT(false);
1166    }
1167    if (event->group != group) {
1168        BDBG_ERR(("Event %#x doesn't belong to the group %#x", event, group));
1169        result = BERR_TRACE(BERR_OS_ERROR);
1170    } else {
1171        BLST_D_REMOVE(&group->members, event, list);
1172        event->group = NULL;
1173    }
1174    rc = pthread_mutex_unlock(&event->lock);
1175    if (rc!=0) {
1176        BDBG_ERR(("pthread_mutex_unlock failed, rc=%d", rc));
1177        BDBG_ASSERT(false);
1178    }
1179    rc = pthread_mutex_unlock(&group->lock);
1180    if (rc!=0) {
1181        BDBG_ERR(("pthread_mutex_unlock failed, rc=%d", rc));
1182        BDBG_ASSERT(false);
1183    }
1184    return result;
1185}
1186
1187static unsigned
1188group_get_events(BKNI_EventGroupHandle group, BKNI_EventHandle *events, unsigned max_events)
1189{
1190    BKNI_EventHandle ev;
1191    int rc;
1192    unsigned event;
1193
1194    BDBG_OBJECT_ASSERT(group, BKNI_EventGroup);
1195
1196    for(event=0, ev=BLST_D_FIRST(&group->members); ev && event<max_events ; ev=BLST_D_NEXT(ev, list)) {
1197        BDBG_OBJECT_ASSERT(ev, BKNI_Event);
1198        rc = pthread_mutex_lock(&ev->lock);
1199        if (rc!=0) {
1200            BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1201            BDBG_ASSERT(false);
1202        }
1203        if (ev->signal) {
1204            ev->signal = false;
1205            events[event] = ev;
1206            event++;
1207        }
1208        rc = pthread_mutex_unlock(&ev->lock);
1209        if (rc!=0) {
1210            BDBG_ERR(("pthread_mutex_unlock failed, rc=%d", rc));
1211            BDBG_ASSERT(false);
1212        }
1213    }
1214    return event;
1215}
1216
1217BERR_Code
1218BKNI_WaitForGroup(BKNI_EventGroupHandle group, int timeoutMsec, BKNI_EventHandle *events, unsigned max_events, unsigned *nevents)
1219{
1220    int rc;
1221    struct timespec target;
1222    BERR_Code result = BERR_SUCCESS;
1223
1224    ASSERT_NOT_CRITICAL();
1225    BDBG_OBJECT_ASSERT(group, BKNI_EventGroup);
1226
1227    if (max_events<1) {
1228        return BERR_TRACE(BERR_INVALID_PARAMETER);
1229    }
1230    if (timeoutMsec!=0 && timeoutMsec!=BKNI_INFINITE) {
1231        if (timeoutMsec<0) {
1232            /* If your app is written to allow negative values to this function, then it's highly likely you would allow -1, which would
1233            result in an infinite hang. We recommend that you only pass positive values to this function unless you definitely mean BKNI_INFINITE. */
1234            BDBG_WRN(("BKNI_WaitForGroup given negative timeout. Possible infinite hang if timeout happens to be -1 (BKNI_INFINITE)."));
1235        }
1236        if (timeoutMsec<10) {
1237            timeoutMsec=10; /* wait at least 10 msec */
1238        }
1239        rc = BKNI_P_SetTargetTime(&target, timeoutMsec);
1240        if (rc) {
1241            return BERR_TRACE(BERR_OS_ERROR);
1242        }
1243    }
1244    rc = pthread_mutex_lock(&group->lock);
1245    if (rc!=0) {
1246        BDBG_ERR(("pthread_mutex_lock failed, rc=%d", rc));
1247        BDBG_ASSERT(false);
1248    }
1249    for(;;) {
1250        *nevents = group_get_events(group, events, max_events);
1251        if (*nevents) {
1252            goto done;
1253        }
1254        if (timeoutMsec == 0) {
1255            result = BERR_TIMEOUT;
1256            goto done;
1257        }
1258        else if (timeoutMsec == BKNI_INFINITE) {
1259            rc = pthread_cond_wait(&group->cond, &group->lock);
1260        }
1261        else {
1262            rc = pthread_cond_timedwait(&group->cond, &group->lock, &target);
1263            if (rc==ETIMEDOUT) {
1264                BDBG_MSG(("BKNI_WaitForGroup(%#x): timeout", (unsigned)(unsigned long)group));
1265                result = BERR_TIMEOUT;
1266                goto done;
1267            }
1268        }
1269        if(rc==EINTR) {
1270            BDBG_MSG(("BKNI_WaitForGroup(%#x): interrupted", (unsigned)(unsigned long)group));
1271            continue;
1272        }
1273        if (rc!=0) {
1274            BDBG_ERR(("%s() returned %d",(timeoutMsec == BKNI_INFINITE) ? "pthread_cond_wait":"pthread_cond_timedwait",rc));
1275            result = BERR_TRACE(BERR_OS_ERROR);
1276            goto done;
1277        }
1278    }
1279
1280done:
1281    rc = pthread_mutex_unlock(&group->lock);
1282    if (rc!=0) {
1283        BDBG_ERR(("pthread_mutex_unlock failed, rc=%d", rc));
1284        BDBG_ASSERT(false);
1285    }
1286    return result;
1287}
1288
1289void *
1290BKNI_Memset(void *b, int c, size_t len)
1291{
1292    return memset(b, c, len);
1293}
1294
1295void *
1296BKNI_Memcpy(void *dst, const void *src, size_t len)
1297{
1298    return memcpy(dst, src, len);
1299}
1300
1301int
1302BKNI_Memcmp(const void *b1, const void *b2, size_t len)
1303{
1304    return memcmp(b1, b2, len);
1305}
1306
1307void *
1308BKNI_Memchr(const void *b, int c, size_t len)
1309{
1310    return memchr(b, c, len);
1311
1312}
1313
1314void *
1315BKNI_Memmove(void *dst, const void *src, size_t len)
1316{
1317    return memmove(dst, src, len);
1318}
1319
1320void
1321BKNI_AssertIsrContext(const char *filename, unsigned lineno)
1322{
1323    if ( !CHECK_CRITICAL() ) {
1324        BDBG_P_AssertFailed("Not in critical section", filename, lineno);
1325    }
1326}
1327
1328void BKNI_Uninit(void)
1329{
1330    BKNI_P_TrackAlloc_Uninit();
1331    return;
1332}
1333
1334
Note: See TracBrowser for help on using the repository browser.