source: svn/trunk/newcon3bcm2_21bu/toolchain/mips-linux-uclibc/include/linux/interrupt.h @ 2

Last change on this file since 2 was 2, checked in by jglee, 11 years ago

first commit

  • Property svn:executable set to *
File size: 7.8 KB
Line 
1/* interrupt.h */
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <asm/hardirq.h>
10#include <asm/ptrace.h>
11#include <asm/system.h>
12
13/*
14 * For 2.4.x compatibility, 2.4.x can use
15 *
16 *      typedef void irqreturn_t;
17 *      #define IRQ_NONE
18 *      #define IRQ_HANDLED
19 *      #define IRQ_RETVAL(x)
20 *
21 * To mix old-style and new-style irq handler returns.
22 *
23 * IRQ_NONE means we didn't handle it.
24 * IRQ_HANDLED means that we did have a valid interrupt and handled it.
25 * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
26 */
27typedef int irqreturn_t;
28
29#define IRQ_NONE        (0)
30#define IRQ_HANDLED     (1)
31#define IRQ_RETVAL(x)   ((x) != 0)
32
33struct irqaction {
34        irqreturn_t (*handler)(int, void *, struct pt_regs *);
35        unsigned long flags;
36        cpumask_t mask;
37        const char *name;
38        void *dev_id;
39        struct irqaction *next;
40        int irq;
41        struct proc_dir_entry *dir;
42};
43
44extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
45extern int request_irq(unsigned int,
46                       irqreturn_t (*handler)(int, void *, struct pt_regs *),
47                       unsigned long, const char *, void *);
48extern void free_irq(unsigned int, void *);
49
50
51#ifdef CONFIG_GENERIC_HARDIRQS
52extern void disable_irq_nosync(unsigned int irq);
53extern void disable_irq(unsigned int irq);
54extern void enable_irq(unsigned int irq);
55#endif
56
57/*
58 * Temporary defines for UP kernels, until all code gets fixed.
59 */
60#ifndef CONFIG_SMP
61static inline void __deprecated cli(void)
62{
63        local_irq_disable();
64}
65static inline void __deprecated sti(void)
66{
67        local_irq_enable();
68}
69static inline void __deprecated save_flags(unsigned long *x)
70{
71        local_save_flags(*x);
72}
73#define save_flags(x) save_flags(&x);
74static inline void __deprecated restore_flags(unsigned long x)
75{
76        local_irq_restore(x);
77}
78
79static inline void __deprecated save_and_cli(unsigned long *x)
80{
81        local_irq_save(*x);
82}
83#define save_and_cli(x) save_and_cli(&x)
84#endif /* CONFIG_SMP */
85
86/* SoftIRQ primitives.  */
87#define local_bh_disable() \
88                do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
89#define __local_bh_enable() \
90                do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
91
92extern void local_bh_enable(void);
93
94/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
95   frequency threaded job scheduling. For almost all the purposes
96   tasklets are more than enough. F.e. all serial device BHs et
97   al. should be converted to tasklets, not to softirqs.
98 */
99
100enum
101{
102        HI_SOFTIRQ=0,
103        TIMER_SOFTIRQ,
104        NET_TX_SOFTIRQ,
105        NET_RX_SOFTIRQ,
106        SCSI_SOFTIRQ,
107        TASKLET_SOFTIRQ
108};
109
110/* softirq mask and active fields moved to irq_cpustat_t in
111 * asm/hardirq.h to get better cache usage.  KAO
112 */
113
114struct softirq_action
115{
116        void    (*action)(struct softirq_action *);
117        void    *data;
118};
119
120asmlinkage void do_softirq(void);
121extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
122extern void softirq_init(void);
123#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
124extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
125extern void FASTCALL(raise_softirq(unsigned int nr));
126
127
128/* Tasklets --- multithreaded analogue of BHs.
129
130   Main feature differing them of generic softirqs: tasklet
131   is running only on one CPU simultaneously.
132
133   Main feature differing them of BHs: different tasklets
134   may be run simultaneously on different CPUs.
135
136   Properties:
137   * If tasklet_schedule() is called, then tasklet is guaranteed
138     to be executed on some cpu at least once after this.
139   * If the tasklet is already scheduled, but its excecution is still not
140     started, it will be executed only once.
141   * If this tasklet is already running on another CPU (or schedule is called
142     from tasklet itself), it is rescheduled for later.
143   * Tasklet is strictly serialized wrt itself, but not
144     wrt another tasklets. If client needs some intertask synchronization,
145     he makes it with spinlocks.
146 */
147
148struct tasklet_struct
149{
150        struct tasklet_struct *next;
151        unsigned long state;
152        atomic_t count;
153        void (*func)(unsigned long);
154        unsigned long data;
155};
156
157#define DECLARE_TASKLET(name, func, data) \
158struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
159
160#define DECLARE_TASKLET_DISABLED(name, func, data) \
161struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
162
163
164enum
165{
166        TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
167        TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
168};
169
170#ifdef CONFIG_SMP
171static inline int tasklet_trylock(struct tasklet_struct *t)
172{
173        return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
174}
175
176static inline void tasklet_unlock(struct tasklet_struct *t)
177{
178        smp_mb__before_clear_bit(); 
179        clear_bit(TASKLET_STATE_RUN, &(t)->state);
180}
181
182static inline void tasklet_unlock_wait(struct tasklet_struct *t)
183{
184        while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
185}
186#else
187#define tasklet_trylock(t) 1
188#define tasklet_unlock_wait(t) do { } while (0)
189#define tasklet_unlock(t) do { } while (0)
190#endif
191
192extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
193
194static inline void tasklet_schedule(struct tasklet_struct *t)
195{
196        if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
197                __tasklet_schedule(t);
198}
199
200extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
201
202static inline void tasklet_hi_schedule(struct tasklet_struct *t)
203{
204        if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
205                __tasklet_hi_schedule(t);
206}
207
208
209static inline void tasklet_disable_nosync(struct tasklet_struct *t)
210{
211        atomic_inc(&t->count);
212        smp_mb__after_atomic_inc();
213}
214
215static inline void tasklet_disable(struct tasklet_struct *t)
216{
217        tasklet_disable_nosync(t);
218        tasklet_unlock_wait(t);
219        smp_mb();
220}
221
222static inline void tasklet_enable(struct tasklet_struct *t)
223{
224        smp_mb__before_atomic_dec();
225        atomic_dec(&t->count);
226}
227
228static inline void tasklet_hi_enable(struct tasklet_struct *t)
229{
230        smp_mb__before_atomic_dec();
231        atomic_dec(&t->count);
232}
233
234extern void tasklet_kill(struct tasklet_struct *t);
235extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
236extern void tasklet_init(struct tasklet_struct *t,
237                         void (*func)(unsigned long), unsigned long data);
238
239/*
240 * Autoprobing for irqs:
241 *
242 * probe_irq_on() and probe_irq_off() provide robust primitives
243 * for accurate IRQ probing during kernel initialization.  They are
244 * reasonably simple to use, are not "fooled" by spurious interrupts,
245 * and, unlike other attempts at IRQ probing, they do not get hung on
246 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
247 *
248 * For reasonably foolproof probing, use them as follows:
249 *
250 * 1. clear and/or mask the device's internal interrupt.
251 * 2. sti();
252 * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
253 * 4. enable the device and cause it to trigger an interrupt.
254 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
255 * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
256 * 7. service the device to clear its pending interrupt.
257 * 8. loop again if paranoia is required.
258 *
259 * probe_irq_on() returns a mask of allocated irq's.
260 *
261 * probe_irq_off() takes the mask as a parameter,
262 * and returns the irq number which occurred,
263 * or zero if none occurred, or a negative irq number
264 * if more than one irq occurred.
265 */
266
267#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
268static inline unsigned long probe_irq_on(void)
269{
270        return 0;
271}
272static inline int probe_irq_off(unsigned long val)
273{
274        return 0;
275}
276static inline unsigned int probe_irq_mask(unsigned long val)
277{
278        return 0;
279}
280#else
281extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
282extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
283extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
284#endif
285
286#endif
Note: See TracBrowser for help on using the repository browser.