source: svn/trunk/newcon3bcm2_21bu/nexus/build/nfe_driver/b_bare_os.c

Last change on this file was 2, checked in by phkim, 11 years ago

1.phkim

  1. revision copy newcon3sk r27
  • Property svn:executable set to *
File size: 30.4 KB
Line 
1/*
2 * Copyright (C) 2010-2011 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 */
17#include <linux/version.h>
18#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
19#include <linux/autoconf.h>
20#else
21#include <generated/autoconf.h>
22#endif
23#include <linux/kernel.h> /* printk */
24#include <linux/slab.h> /* memset and friends */
25#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
26#define DECLARE_MUTEX DEFINE_SEMAPHORE
27#define init_MUTEX(sem)     sema_init(sem, 1)
28#define init_MUTEX_LOCKED(sem)  sema_init(sem, 0)
29#endif
30#include <linux/wait.h>
31#include <linux/delay.h>
32#include <linux/signal.h>
33#include <linux/semaphore.h>
34#include <linux/sched.h>
35#include <linux/freezer.h>
36#include <linux/hardirq.h>
37#include <linux/interrupt.h>
38#include <linux/kthread.h>
39#include <linux/kmod.h>
40#include <asm/atomic.h>
41#include <asm/uaccess.h>
42#include <asm/brcmstb/brcmapi.h>
43#include "b_bare_os.h"
44#include "nexus_generic_driver.h"
45
46/* cannot include magnum header files here */
47
48#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
49#error linux 2.6.31-3.2 or higher required
50#endif
51
52static int print_error(const char *file, int line, const char *test, int rc)
53{
54    if (rc) {
55        printk("!!! BERR_TRACE %s = %d at %s:%d\n", test, rc, file, line);
56    }
57    return rc;
58}
59
60#define BERR_TRACE(rc) (print_error(__FILE__,__LINE__,#rc,rc))
61#define BDBG_ERR(PARAMS) do{printk("### ");printk PARAMS;printk("\n");}while(0)
62#define BDBG_WRN(PARAMS) do{printk("*** ");printk PARAMS;printk("\n");}while(0)
63#define BDBG_MSG(PARAMS) /*do{printk("--- ");printk PARAMS;printk("\n");}while(0)*/
64#define BDBG_MSG_IRQ(x)  /*BDBG_MSG(x)*/
65#define BDBG_ASSERT(x) do {if (!(x)) {BDBG_ERR(("ASSERT %s failed at %s:%d", #x, __FILE__, __LINE__));b_bare_fail();}} while(0)
66#define BERR_SUCCESS 0
67#define BERR_OS_ERROR -1
68#define BERR_TIMEOUT 1 /* >0 */
69#define BSTD_UNUSED(x) ((x)=(x))
70
71/* NUM_L1_REGISTERS can be >= the number of actual L1 regiters */
72#define NUM_L1_REGISTERS 4
73#define NUM_IRQS (32*NUM_L1_REGISTERS)
74/* b_bare_os L1 interrupts are 0-based. linux is 1-based. */
75#define LINUX_IRQ(i) (i+1)
76#define NEXUS_IRQ(i) (i-1)
77
78struct b_bare_interrupt_entry {
79    const char *name;
80    void (*handler)(void *,int);
81    void *context_ptr;
82    int context_int;
83    b_bare_os_special_interrupt_handler special_handler;
84    bool requested; /* request_irq called. must be defered until first enable. */
85    bool enabled; /* externally enabled by caller. must also check IntrMaskStatus if internally disabled for tasklet. */
86    bool shared;
87};
88
89static struct b_bare_interrupt_state {
90    spinlock_t lock;
91    bool started;
92    bool scheduled;
93    struct {
94        uint32_t IntrStatus;
95        uint32_t IntrMaskStatus;
96    } processing[NUM_L1_REGISTERS], pending[NUM_L1_REGISTERS];
97    struct b_bare_interrupt_entry table[NUM_IRQS];
98    struct work_struct task;
99} b_bare_interrupt_state = {
100    SPIN_LOCK_UNLOCKED,
101};
102
103static void b_bare_enter_critical_section(void);
104static void b_bare_leave_critical_section(void);
105static void b_bare_fail(void);
106static void b_bare_lock_signals(sigset_t *pPrevMask);
107static void b_bare_restore_signals(sigset_t *pPrevMask);
108static int b_bare_signal_pending(void);
109static void b_bare_disconnect_interrupt(unsigned irqNum);
110
111static struct task_struct *g_csOwner;
112
113#define SET_CRITICAL() do { g_csOwner = current; } while (0)
114#define CLEAR_CRITICAL() do { g_csOwner = NULL; } while (0)
115
116#define CHECK_CRITICAL() ( g_csOwner == current || in_interrupt() )
117
118#define ASSERT_CRITICAL() do \
119{\
120    if ( !CHECK_CRITICAL() )\
121    {\
122        printk("Error, must be in critical section to call %s\n", __FUNCTION__);\
123        b_bare_fail();\
124    }\
125} while (0)
126
127#define ASSERT_NOT_CRITICAL() do \
128{\
129    if ( CHECK_CRITICAL() )\
130    {\
131        printk("Error, must not be in critical section to call %s\n", __FUNCTION__);\
132        b_bare_fail();\
133    }\
134} while (0)
135
136static struct tasklet_struct *g_pIsrTasklet;
137static struct tasklet_struct *g_pPendingTasklet;
138static sigset_t g_blockedSignals;
139
140struct b_bare_os_signal_tag {
141    wait_queue_head_t wq;
142    atomic_t eventset;
143};
144
145struct b_bare_os_lock_tag {
146    struct semaphore sem;
147};
148
149int b_bare_os_init(void)
150{
151    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
152    unsigned i;
153
154    memset(&state->table, 0, sizeof(state->table));
155    for(i=0;i<NUM_L1_REGISTERS;i++) {
156        state->processing[i].IntrMaskStatus = ~0;
157        state->processing[i].IntrStatus = 0;
158        state->pending[i] = state->processing[i];
159    }
160    state->scheduled = false;
161    state->started = true;
162    printk("b_bare_os initialized\n");
163
164    return 0;
165}
166
167void b_bare_os_uninit(void)
168{
169    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
170    unsigned i;
171
172    state->started = false;
173    for(i=0;i<NUM_IRQS;i++) {
174        if (state->table[i].handler) {
175            printk("auto-b_bare_disconnect_interrupt %d\n", LINUX_IRQ(i));
176            b_bare_disconnect_interrupt(i);
177        }
178    }
179    printk("b_bare_os uninitialized\n");
180}
181
182/* coverity[+kill] */
183static void b_bare_fail(void)
184{
185    volatile int i=0;
186    volatile int j=0;
187    printk("b_bare_fail: forcing oops\n");
188    i = *(int *)i;
189    i /= j;
190    panic("b_bare_fail: panic...");
191}
192
193static void 
194b_bare_delay(unsigned microsec)
195{
196    udelay(microsec);
197}
198
199static void 
200b_bare_sleep(unsigned millisec)
201{
202    unsigned long ticks;
203    long rc;
204    sigset_t mask;
205    int retval;
206
207    ticks = (millisec * HZ) / 1000;
208
209    /* Each tick is 1 or 10ms, so we must wait at least that long */
210    if (ticks == 0) {
211        ticks = 1;
212    }
213
214    /* Block all non-terminal signals while sleeping */
215    b_bare_lock_signals(&mask);
216
217    for(;;) {
218        set_current_state(TASK_INTERRUPTIBLE);
219        rc = schedule_timeout(ticks);
220        if (rc==0) {
221            retval = BERR_SUCCESS;
222            break;
223        }
224        if (b_bare_signal_pending()) {
225            retval = BERR_TRACE(BERR_OS_ERROR);
226            break;
227        }
228        ticks = rc; /* keep sleeping */
229    }
230
231    /* Restore original signal mask */
232    b_bare_restore_signals(&mask);
233
234    return;
235}
236
237static b_bare_os_signal
238b_bare_signal_create(void)
239{
240    b_bare_os_signal event;
241    event = kmalloc(sizeof(*event), GFP_KERNEL);
242    if (!event)
243        return NULL;
244    atomic_set(&event->eventset, 0);
245    init_waitqueue_head(&event->wq);
246    return event;
247}
248
249static void 
250b_bare_signal_destroy(b_bare_os_signal event)
251{
252    kfree(event);
253}
254
255static void 
256b_bare_signal_set(b_bare_os_signal event)
257{
258    atomic_set(&event->eventset, 1);
259    wake_up_interruptible(&event->wq);
260}
261
262/**
263* This will modify the caller's signal mask to block all signals
264* except for the terminal signals listed in BKNI_Init().  This prevents
265* user signals from interrupting magnum code, but they will be safely
266* dispatched at the next opportunity.
267**/
268static void b_bare_lock_signals(sigset_t *pPrevMask)
269{
270    unsigned long flags;
271    spinlock_t *pSignalLock;
272
273    /* 2.6 signal lock */
274    pSignalLock = &current->sighand->siglock;
275
276    /* Lock the signal structs, should use spin_lock_irq since the same
277     * spinlock coule be acqured by the kernel code from within IRQ handler */
278    spin_lock_irqsave(pSignalLock, flags);
279
280    /* Save current signals */
281    memcpy(pPrevMask, &current->blocked, sizeof(sigset_t));
282    /* Set to block all but the terminal signals */
283    memcpy(&current->blocked, &g_blockedSignals, sizeof(sigset_t));
284    /* Must be called after manipulating blocked signals */
285    recalc_sigpending();
286
287    /* Release the lock */
288    spin_unlock_irqrestore(pSignalLock, flags);
289    return;
290}
291
292/**
293* This will restore the original signal mask saved by b_bare_lock_signals()
294**/
295static void b_bare_restore_signals(sigset_t *pPrevMask)
296{
297    unsigned long flags;
298    spinlock_t *pSignalLock;
299
300    /* 2.6 signal lock */
301    pSignalLock = &current->sighand->siglock;
302
303    /* Lock the signal structs, should use spin_lock_irq since the same
304     * spinlock coule be acqured by the kernel code from within IRQ handler */
305    spin_lock_irqsave(pSignalLock, flags);
306
307    /* Restore signals */
308    memcpy(&current->blocked, pPrevMask, sizeof(sigset_t));
309    /* Must be called after manipulating blocked signals */
310    recalc_sigpending();
311
312    /* Release the lock */
313    spin_unlock_irqrestore(pSignalLock, flags);
314}
315
316/**
317* This will return 1 if a signal is pending.  Previous implementations
318* of this function would dequeue all non-terminal signals to allow the
319* caller to continue waiting.  That approach had the drawback of consuming
320* any non-terminal signals and preventing the application from using them.
321* The new implementation assumes that the caller has called b_bare_lock_signals()
322* prior to waiting on an event or timeout and therefore can only be interrupted by
323* a terminal signal.  When the original signal mask is restored via b_bare_restore_signals,
324* the user signals will be dispatched again.
325**/
326static int b_bare_signal_pending(void)
327{
328    while (signal_pending(current)) {
329        if ( ! try_to_freeze()) {       /* try to freeze returns 0 when no freeze request */
330            return 1;                   /* signal pending (but no freeze request)         */
331        }
332    }
333    return 0;                           /* no signal (or freeze request) pending          */
334}
335
336static int 
337b_bare_signal_wait(b_bare_os_signal event, unsigned timeoutMsec)
338{
339    int result = BERR_TIMEOUT;
340    sigset_t mask;
341    int prev_eventset;
342
343    if ( timeoutMsec )
344    {
345        ASSERT_NOT_CRITICAL();
346    }
347    else if ( CHECK_CRITICAL() )
348    {
349        /* Don't mess with current or wait queues from an ISR */
350        prev_eventset = atomic_xchg(&event->eventset, 0); /* this atomically clears 'eventset' and returns current (old) value */
351        if ( prev_eventset)
352        {
353            return BERR_SUCCESS;
354        }
355        else
356        {
357            return BERR_TIMEOUT;
358        }
359    }
360
361    /* This is used to achieve consistency between different OS's. */
362    if (timeoutMsec>0 && timeoutMsec<30)
363    {
364        /* wait at least 30 msec */
365        timeoutMsec = 30;
366    }
367
368    b_bare_lock_signals(&mask);
369
370    if (b_bare_signal_pending())
371        result = BERR_OS_ERROR;
372    else
373    {
374        wait_queue_t wqe;
375        unsigned long ticks;
376
377        init_waitqueue_entry(&wqe, current);
378        add_wait_queue(&event->wq, &wqe);
379
380        if (timeoutMsec == (unsigned)-1)
381            ticks = MAX_SCHEDULE_TIMEOUT;
382        else if (timeoutMsec)
383            ticks = (timeoutMsec * HZ) / 1000;
384        else
385            ticks = 0;
386
387        /* Need to repeat the sleep until the entire timeout
388        is consumed, or event occurs, or a true fatal signal is detected.
389        It's possible to be signalled and yet keep going. */
390        for ( ;; )
391        {
392            /* Be sure to go half asleep before checking condition. */
393            /* Otherwise we have a race condition between when we   */
394            /* check the condition and when we call schedule().     */
395            set_current_state(TASK_INTERRUPTIBLE);
396
397            prev_eventset = atomic_xchg(&event->eventset, 0); /* this atomically clears 'eventset' and returns current (old) value */
398            if(prev_eventset) 
399            {
400                result = BERR_SUCCESS;
401                break;
402            }
403            else if (!ticks)
404            {
405                result = BERR_TIMEOUT;
406                break;
407            }
408            else
409            {
410                /* When SetEvent is called, event process on event->wq is woken. */
411                ticks = schedule_timeout(ticks);
412                if (b_bare_signal_pending())
413                {
414                    result = BERR_OS_ERROR;
415                    break;
416                }
417            }
418        }
419
420        set_current_state(TASK_RUNNING);
421        remove_wait_queue(&event->wq, &wqe);
422    }
423
424    b_bare_restore_signals(&mask);
425
426    return result;
427}
428
429static spinlock_t g_criticalSection = SPIN_LOCK_UNLOCKED;
430static DECLARE_MUTEX(g_csMutex);
431
432static void b_bare_enter_critical_section(void)
433{
434    ASSERT_NOT_CRITICAL();
435    if ( g_pIsrTasklet )
436    {
437        down(&g_csMutex);
438        tasklet_disable(g_pIsrTasklet);
439    }
440    else
441    {
442        spin_lock_bh(&g_criticalSection);
443    }
444    SET_CRITICAL();
445}
446
447static void b_bare_leave_critical_section(void)
448{
449    struct tasklet_struct *pIsrTasklet;
450
451    ASSERT_CRITICAL();
452    CLEAR_CRITICAL();
453
454    /* Store tasklet and replace with any possible changes */
455    pIsrTasklet = g_pIsrTasklet;
456    g_pIsrTasklet = g_pPendingTasklet;
457
458    /* Re-enable interrupts in the same way they were disabled */
459    if ( pIsrTasklet )
460    {
461        tasklet_enable(pIsrTasklet);
462        up(&g_csMutex);
463    }
464    else
465    {
466        spin_unlock_bh(&g_criticalSection);
467    }
468}
469
470static b_bare_os_lock
471b_bare_lock_create(void)
472{
473    b_bare_os_lock lock;
474    lock = kmalloc(sizeof(*lock), GFP_KERNEL);
475    if (!lock) {
476        return NULL;
477    }
478    init_MUTEX(&(lock)->sem);
479    return lock;
480}
481
482static void 
483b_bare_lock_destroy(b_bare_os_lock lock)
484{
485    kfree(lock);
486}
487
488static struct b_bare_os_lock_tag isr_lock_impl;
489
490b_bare_os_lock b_bare_get_interrupt_lock(void)
491{
492    return &isr_lock_impl;
493}
494
495static int 
496b_bare_lock_acquire(b_bare_os_lock lock)
497{
498    if (lock == &isr_lock_impl) {
499        b_bare_enter_critical_section();
500    }
501    else {
502        down(&lock->sem);
503    }
504    return BERR_SUCCESS;
505}
506
507static int 
508b_bare_lock_try_acquire(b_bare_os_lock lock)
509{
510    if (lock == &isr_lock_impl) {
511        return BERR_OS_ERROR;
512    }
513    else if (down_trylock(&lock->sem)) {
514        return BERR_TIMEOUT;
515    }
516    else {
517        return BERR_SUCCESS;
518    }
519}
520
521static void 
522b_bare_lock_release(b_bare_os_lock lock)
523{
524    if (lock == &isr_lock_impl) {
525        b_bare_leave_critical_section();
526    }
527    else {
528        up(&lock->sem);
529    }
530}
531
532static void NEXUS_Platform_P_Isr(unsigned long data);
533DECLARE_TASKLET(NEXUS_Platform_P_IsrTasklet, NEXUS_Platform_P_Isr, 0);
534
535/* ISR handler, calls L1 interrupt handler */
536static void __attribute__((no_instrument_function))
537NEXUS_Platform_P_Isr(unsigned long data)
538{
539    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
540    unsigned long flags;
541
542    BSTD_UNUSED(data);
543
544    if(!state->started) {
545        goto done;
546    }
547
548    /* Loop until all is cleared */
549    for(;;) {
550        uint32_t re_enable[NUM_L1_REGISTERS];
551        uint32_t status;
552        unsigned bit;
553        unsigned i;
554
555        /* Mask interrupts only to read current status */
556        spin_lock_irqsave(&state->lock, flags);
557
558        for(status=0,i=0;i<NUM_L1_REGISTERS;i++) {
559            /* swap pending and current interrupt, then clear pending  and reenable interrupts */
560            status |= state->pending[i].IntrStatus;
561            /* coverity[use] */
562            state->processing[i].IntrStatus = state->pending[i].IntrStatus;
563            /* mask interrupts */
564            state->processing[i].IntrStatus &= ~state->processing[i].IntrMaskStatus;
565
566            /* Delay reenabling interrupts until after they have been serviced */
567            /* disable_irq nests, so if the interrupt handler disables the interrupt */
568            /* again, this is still safe. */
569            re_enable[i] = state->processing[i].IntrStatus & state->pending[i].IntrStatus;
570
571            /* clear list of delayed interrupts */
572            state->pending[i].IntrStatus = 0;
573        }
574
575        /* Restore interrupts */
576        spin_unlock_irqrestore(&state->lock, flags);
577
578        if(status==0) {
579            goto done;
580        }
581
582
583        /* then call L1 handlers inside critical section (KNI serializes with tasklet_disable so we do nothing here) */
584        for(bit=0; bit<NUM_IRQS ; bit+=32) {
585
586            status = state->processing[bit/32].IntrStatus;
587
588            if(!status) {
589                continue;
590            }
591            for(i=0;i<32;i++) {
592                if(status & (1<<i)) {
593                    state->table[i+bit].handler(state->table[i+bit].context_ptr, state->table[i+bit].context_int);
594                }
595            }
596        }
597
598        /* Now, restore any disabled interrupts (masking not required) */
599        for(bit=0; bit<NUM_IRQS ; bit+=32) {
600            status = re_enable[bit/32];
601            if (!status)  {
602                continue;
603            }
604            for(i=0;i<32;i++)
605            {
606                /* only enable interrupts which are not masked by the software */
607                if (status & (1<<i))
608                {
609                    BDBG_MSG_IRQ(("BH enable[irq] %d", LINUX_IRQ(i+bit)));
610                    if (!state->table[i+bit].special_handler)
611                    {
612                        enable_irq(LINUX_IRQ(i+bit));
613                    }
614                }
615            }
616        }
617    }
618
619
620done:
621   /* Complete - indicate we're ready to run again */
622   /* final unlock */
623    /* Serialize with the TH so it will reschedule this tasklet if needed */
624    spin_lock_irqsave(&state->lock, flags);
625    state->scheduled = false;
626    spin_unlock_irqrestore(&state->lock, flags);
627    return;
628}
629
630#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
631static irqreturn_t __attribute__((no_instrument_function))
632NEXUS_Platform_P_LinuxIsr(int linux_irq, void *dev_id)
633#else
634static irqreturn_t __attribute__((no_instrument_function))
635NEXUS_Platform_P_LinuxIsr(int linux_irq, void *dev_id, struct pt_regs *regs)
636#endif
637{
638    struct b_bare_interrupt_entry *entry = dev_id;
639    unsigned irq = NEXUS_IRQ(linux_irq);
640    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
641    unsigned i;
642    unsigned long flags;
643
644    if (irq >= NUM_IRQS) {
645        goto error;
646    }
647
648    /* Make sure we're serialized with the tasklet across multiple CPUs */
649    spin_lock_irqsave(&state->lock, flags);
650
651    /* disable irq */
652    BDBG_MSG_IRQ(("TH disable[irq] %d", linux_irq));
653    if ( !entry->special_handler) {
654        disable_irq_nosync(linux_irq);
655    } else {
656        entry->special_handler(irq);
657    }
658
659    for(i=0;i<NUM_L1_REGISTERS;i++,irq-=32) {
660        if(irq<32) {
661            state->pending[i].IntrStatus |= 1<<irq;
662            break;
663        }
664    }
665
666    if (state->started && !state->scheduled)
667    {
668        /* queue task only once per activation */
669        state->scheduled = true;
670
671        /* This needs to run as a high-priority tasklet, which will immediately follow */
672        tasklet_hi_schedule(&NEXUS_Platform_P_IsrTasklet);
673    }
674
675    spin_unlock_irqrestore(&state->lock, flags);
676
677    return IRQ_HANDLED;
678   
679error:
680    BDBG_WRN(("unknown irq %d", linux_irq));
681    disable_irq_nosync(linux_irq);
682    return IRQ_HANDLED;
683}
684
685static int b_bare_connect_interrupt(const char *name, unsigned irqNum, 
686    void (*handler)(void *,int), void *context_ptr, int context_int, 
687    bool shared, b_bare_os_special_interrupt_handler special_handler)
688{
689    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
690    struct b_bare_interrupt_entry *entry;
691    unsigned long flags;
692
693    if (irqNum>=NUM_IRQS || handler==NULL || !state->started) {
694        return BERR_TRACE(-1);
695    }
696    entry = &state->table[irqNum];
697    if (entry->handler) {
698        /* can't overwrite old handler, b_bare_disconnect_interrupt shall be called first */
699        return BERR_TRACE(-1);
700    }
701   
702    spin_lock_irqsave(&state->lock, flags);
703    entry->name = name;
704    entry->handler = handler;
705    entry->context_ptr = context_ptr;
706    entry->context_int = context_int;
707    entry->special_handler = special_handler;
708    entry->shared = shared;
709    BDBG_ASSERT(!entry->enabled);
710    BDBG_ASSERT(!entry->requested);
711    /* request_irq deferred to first enable. */
712    spin_unlock_irqrestore(&state->lock, flags);
713   
714    return 0;
715}
716
717static int b_bare_enable_interrupt(unsigned irqNum)
718{
719    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
720    struct b_bare_interrupt_entry *entry;
721    unsigned reg = irqNum/32;
722    unsigned long flags;
723
724    if (irqNum>=NUM_IRQS || !state->started) {
725        return BERR_TRACE(-1);
726    }
727    entry = &state->table[irqNum];
728    if (!entry->handler) {
729        return BERR_TRACE(-1);
730    }
731   
732    spin_lock_irqsave(&state->lock, flags);
733    state->processing[reg].IntrMaskStatus &= ~(1 << (irqNum%32));
734    if (!entry->requested) {
735        int irqflags;
736#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
737        irqflags = 0; /* later versions of linux always enable interrupts on request_irq */
738        if (entry->shared) {
739            irqflags |= IRQF_SHARED;
740        }
741#else
742        irqflags = SA_INTERRUPT;
743        if (entry->shared) {
744            irqflags |= SA_SHIRQ;
745        }
746#endif   
747   
748        spin_unlock_irqrestore(&state->lock, flags);
749        BDBG_MSG(("connect interrupt %s %d (%d, %p)", entry->name, LINUX_IRQ(irqNum), entry->shared, entry->special_handler));
750        if (request_irq(LINUX_IRQ(irqNum), NEXUS_Platform_P_LinuxIsr, irqflags, entry->name, entry)) {
751            /* disable */
752            spin_lock_irqsave(&state->lock, flags);
753            entry->handler = NULL;
754            state->processing[reg].IntrMaskStatus |= (1 << (irqNum%32));
755            spin_unlock_irqrestore(&state->lock, flags);
756            return BERR_TRACE(-1);
757        }
758        entry->requested = true;
759        entry->enabled = true;
760        return 0;
761    }   
762    else if (!entry->enabled) {
763        BDBG_MSG(("enable interrupt %d", LINUX_IRQ(irqNum)));
764        if (!entry->special_handler) {
765            enable_irq(LINUX_IRQ(irqNum));
766        }
767        entry->enabled = true;
768    }
769    spin_unlock_irqrestore(&state->lock, flags);
770    return 0;
771}
772
773static void b_bare_disable_interrupt(unsigned irqNum)
774{
775    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
776    struct b_bare_interrupt_entry *entry;
777    unsigned reg = irqNum/32;
778    unsigned long flags;
779
780    if (irqNum>=NUM_IRQS) {
781        BERR_TRACE(-1);
782        return;
783    }
784    entry = &state->table[irqNum];
785    if (!entry->handler) {
786        BERR_TRACE(-1);
787        return;
788    }
789   
790    if (entry->enabled) {
791        BDBG_ASSERT(entry->requested);
792        BDBG_MSG(("disable interrupt %d", LINUX_IRQ(irqNum)));
793        spin_lock_irqsave(&state->lock, flags);
794        state->processing[reg].IntrMaskStatus |= (1 << (irqNum%32));
795        if (!entry->special_handler) {
796            /* If the TH has received the interrupt but it has not been processed by the tasklet, don't nest the disable call. */
797            if ( 0 == (state->pending[reg].IntrStatus & (1 << (irqNum%32))) )
798            {
799                disable_irq_nosync(LINUX_IRQ(irqNum));
800            }
801        }
802        entry->enabled = false;
803        spin_unlock_irqrestore(&state->lock, flags);
804    }
805}
806
807static void b_bare_disconnect_interrupt(unsigned irqNum)
808{
809    int rc;
810    struct b_bare_interrupt_state *state = &b_bare_interrupt_state;
811    struct b_bare_interrupt_entry *entry;
812    unsigned reg = irqNum/32;
813    unsigned long flags;
814
815    if(irqNum>=NUM_IRQS) {
816        rc = BERR_TRACE(-1);
817        return;
818    }
819    entry = &state->table[irqNum];
820    if (!entry->handler) {
821        BERR_TRACE(-1);
822        return;
823    }
824   
825    spin_lock_irqsave(&state->lock, flags);
826    BDBG_MSG(("disconnect interrupt %d", LINUX_IRQ(irqNum)));
827    entry->handler = NULL;
828    if (entry->enabled) {
829        state->processing[reg].IntrMaskStatus |= (1 << (irqNum%32));
830        entry->enabled = false;
831    }
832    if (entry->requested) {
833        free_irq(LINUX_IRQ(irqNum), entry);
834        entry->requested = false;
835    }
836    spin_unlock_irqrestore(&state->lock, flags);
837}
838
839static void *
840b_bare_mmap(bool cached, unsigned offset, unsigned length)
841{
842    void *addr;
843   
844    /* this assumes linux 2.6.31-3.2 or later for full mmap support through kernel's ioremap */
845    if (cached)
846        addr = ioremap_cachable(offset, length);
847    else
848        addr = ioremap_nocache(offset, length);
849    if (!addr) {
850        BDBG_ERR(("ioremap(%#x, %#x, %s) failed", offset, length, cached?"cached":"uncached"));
851    }
852    return addr;
853}
854
855static void 
856b_bare_munmap(void *addr, size_t size)
857{
858    BSTD_UNUSED(size);
859    iounmap(addr);
860}
861
862static void 
863b_bare_atomic_update(unsigned reg, uint32_t mask, uint32_t value)
864{
865    uint32_t temp;
866    unsigned long flags;
867
868    /* this spinlock synchronizes with any kernel use of a set of shared registers */
869    spin_lock_irqsave(&brcm_magnum_spinlock, flags);
870
871    /* read/modify/write by calling back into firmware */
872    nexus_generic_driver_read_register(reg, &temp);
873    temp &= ~mask;
874    temp |= value;
875    nexus_generic_driver_write_register(reg, temp);
876
877    spin_unlock_irqrestore(&brcm_magnum_spinlock, flags);
878}
879
880static void *
881b_bare_malloc(size_t size)
882{
883    return kmalloc(size, GFP_KERNEL);
884}
885
886static void 
887b_bare_free(void *ptr)
888{
889    kfree(ptr);
890    return;
891}
892
893static void 
894b_bare_print_debug(bool high_priority, const char *str)
895{
896    BSTD_UNUSED(high_priority);
897    printk("%s", str);
898    return;
899}
900
901/* units of b_bare_os_tick is milliseconds */
902static b_bare_os_tick
903b_bare_current_tick(void)
904{
905    return jiffies_to_msecs(jiffies);
906}
907
908static unsigned 
909b_bare_tick_diff(b_bare_os_tick future, b_bare_os_tick past)
910{
911    return future - past;
912}
913
914struct b_bare_os_thread_tag {
915    char name[16];
916    void (*pThreadFunc)(void *);
917    void *pContext;
918    b_bare_os_thread_settings settings;
919    struct task_struct *task;
920};
921
922static int b_bare_thread_start(void *data)
923{
924    b_bare_os_thread thread = data;
925    thread->pThreadFunc(thread->pContext);
926    /* do not return until stopped */
927    while (1) {
928        set_current_state(TASK_INTERRUPTIBLE); /* go half asleep before checking condition */
929        if (kthread_should_stop()) break;
930        schedule();
931    }
932
933    return 0;
934}
935
936static b_bare_os_thread
937b_bare_thread_create(const char *pThreadName, void (*callback)(void *), void *pContext, b_bare_os_thread_settings *pSettings)
938{
939    b_bare_os_thread thread;
940    int mrc;
941
942    BDBG_ASSERT(pThreadName);
943    BDBG_ASSERT(callback);
944
945    thread = kmalloc(sizeof(*thread), GFP_KERNEL);
946    if(!thread) {
947        mrc = BERR_TRACE(BERR_OS_ERROR);
948        goto err_alloc;
949    }
950    strncpy(thread->name, pThreadName,sizeof(thread->name)-1);
951    thread->name[sizeof(thread->name)-1]='\0';
952    thread->pThreadFunc = callback;
953    thread->pContext = pContext;
954    thread->settings = *pSettings;
955
956    /* in linux kernel stack size is fixed to 2 4K pages */
957#define LINUX_KERNEL_STACK_SIZE (8*1024)
958    if (thread->settings.stack_size < LINUX_KERNEL_STACK_SIZE) {
959        BDBG_WRN(("b_bare_thread_create: %s stack size %u forced to %u",  thread->name, thread->settings.stack_size, LINUX_KERNEL_STACK_SIZE));
960    }
961    thread->settings.stack_size = LINUX_KERNEL_STACK_SIZE;
962
963    thread->task = kthread_run(b_bare_thread_start, thread, thread->name);
964    if (!thread->task) {
965        kfree(thread);
966        mrc = BERR_TRACE(BERR_OS_ERROR);
967        return NULL;
968    }
969
970    return thread;
971
972err_alloc:
973    return NULL;
974}
975
976static void 
977b_bare_thread_destroy(b_bare_os_thread thread)
978{
979    kthread_stop(thread->task);
980    kfree(thread);
981    return;
982}
983
984static void 
985b_bare_dcache_flush(void *pvAddr, size_t ulNumBytes)
986{
987    if (ulNumBytes) {
988        if((unsigned long)pvAddr >= 0x80000000) {
989            dma_cache_wback_inv((unsigned long)pvAddr, ulNumBytes);
990        } else {
991            BDBG_MSG(("flushing fake address %#lx", (unsigned long)pvAddr));
992        }
993    }
994    return;
995}
996
997static struct {
998    unsigned count; /* provides short-circuit to full array search */
999    struct {
1000        const char *key,*value;
1001    } env[64];
1002} b_bare_env = {
1003    0
1004   /* *** */
1005};
1006
1007static const char *
1008b_bare_getenv(const char *name)
1009{
1010    unsigned i;
1011    for(i=0;i<b_bare_env.count;i++) {
1012        if (b_bare_env.env[i].key && strcmp(b_bare_env.env[i].key, name)==0) {
1013            return b_bare_env.env[i].value;
1014        }
1015    }
1016    return NULL;
1017}
1018
1019static void
1020b_bare_setenv(const char *name, const char *value)
1021{
1022    unsigned i;
1023    unsigned freeslot = b_bare_env.count;
1024
1025    for(i=0;i<b_bare_env.count;i++) {
1026        if (!b_bare_env.env[i].key && freeslot == b_bare_env.count) {
1027            freeslot = i;
1028        }
1029
1030        if (b_bare_env.env[i].key && strcmp(b_bare_env.env[i].key, name)==0) {
1031            b_bare_env.env[i].value = value;
1032            if (!value) {
1033                /* if we're unsetting, free the slot but don't reduce the count */
1034                b_bare_env.env[i].key = NULL;
1035            }
1036            return;
1037        }
1038    }
1039    if (!value) return;
1040
1041    /* save new key.value pair */
1042    if (freeslot<sizeof(b_bare_env.env)/sizeof(*b_bare_env.env)) {
1043        b_bare_env.env[freeslot].key = name;
1044        b_bare_env.env[freeslot].value = value;
1045        if (freeslot == b_bare_env.count) {
1046            b_bare_env.count++;
1047        }
1048    }
1049    else {
1050        BDBG_WRN(("Unable to store setenv(%s,%s)", name, value));
1051    }
1052    return;
1053}
1054
1055static int b_bare_copy_from_process(void *dest, const void *src, unsigned size)
1056{
1057    return (int)copy_from_user(dest, src, size);
1058}
1059
1060static int b_bare_copy_to_process(void *dest, const void *src, unsigned size)
1061{
1062    return (int)copy_to_user(dest, src, size);
1063}
1064
1065static void b_bare_terminate_process(unsigned id)
1066{
1067    int rc;
1068    char pidstr[16];
1069    char *argv[] = {"/bin/kill", "-9", pidstr, NULL};
1070    snprintf(pidstr, 16, "%u", id);
1071    rc = call_usermodehelper("/bin/kill", argv, NULL, UMH_WAIT_PROC);
1072    if (rc) BERR_TRACE(rc);
1073}
1074
1075b_bare_os_interface b_bare_os = {
1076    b_bare_malloc,
1077    b_bare_free,
1078    b_bare_print_debug,
1079    b_bare_current_tick,
1080    b_bare_tick_diff,
1081    b_bare_mmap,
1082    b_bare_munmap,
1083    b_bare_dcache_flush,
1084    b_bare_delay,
1085    b_bare_sleep,
1086    b_bare_lock_create,
1087    b_bare_lock_acquire,
1088    b_bare_lock_try_acquire,
1089    b_bare_lock_release,
1090    b_bare_lock_destroy,
1091    b_bare_signal_create,
1092    b_bare_signal_wait,
1093    b_bare_signal_set,
1094    b_bare_signal_destroy,
1095    b_bare_copy_from_process,
1096    b_bare_copy_to_process,
1097    b_bare_thread_create,
1098    b_bare_thread_destroy,
1099    b_bare_getenv,
1100    b_bare_setenv,
1101    b_bare_terminate_process,
1102    b_bare_atomic_update,
1103    b_bare_connect_interrupt,
1104    b_bare_enable_interrupt,
1105    b_bare_disable_interrupt,
1106    b_bare_disconnect_interrupt,
1107    b_bare_get_interrupt_lock,
1108        -1
1109};
1110
1111
1112/* The following pointer and accessor function are only used on the driver side. */
1113/* On the firmware side the pointer is located in kni/base/bkni.c and the accessor is located in stubs_rev.S */
1114/* as this file is not linked with the firmware. */
1115b_bare_os_interface *pb_bare_os = &b_bare_os;   
1116
1117b_bare_os_interface *b_get_bare_os(void)
1118{
1119        return pb_bare_os;
1120}
1121
Note: See TracBrowser for help on using the repository browser.