source: svn/trunk/newcon3bcm2_21bu/toolchain/mips-linux-uclibc/include/linux/seqlock.h @ 2

Last change on this file since 2 was 2, checked in by phkim, 11 years ago

1.phkim

  1. revision copy newcon3sk r27
  • Property svn:executable set to *
File size: 4.4 KB
Line 
1#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3/*
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consitent set of information
6 * and is willing to retry if the information changes.  Readers never
7 * block but they may have to retry if a writer is in
8 * progress. Writers do not wait for readers.
9 *
10 * This is not as cache friendly as brlock. Also, this will not work
11 * for data that contains pointers, because any writer could
12 * invalidate a pointer that a reader was following.
13 *
14 * Expected reader usage:
15 *      do {
16 *          seq = read_seqbegin(&foo);
17 *      ...
18 *      } while (read_seqretry(&foo, seq));
19 *
20 *
21 * On non-SMP the spin locks disappear but the writer still needs
22 * to increment the sequence variables because an interrupt routine could
23 * change the state of the data.
24 *
25 * Based on x86_64 vsyscall gettimeofday
26 * by Keith Owens and Andrea Arcangeli
27 */
28
29#include <linux/preempt.h>
30
31typedef struct {
32        unsigned sequence;
33        spinlock_t lock;
34} seqlock_t;
35
36/*
37 * These macros triggered gcc-3.x compile-time problems.  We think these are
38 * OK now.  Be cautious.
39 */
40#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
41#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
42
43
44/* Lock out other writers and update the count.
45 * Acts like a normal spin_lock/unlock.
46 * Don't need preempt_disable() because that is in the spin_lock already.
47 */
48static inline void write_seqlock(seqlock_t *sl)
49{
50        spin_lock(&sl->lock);
51        ++sl->sequence;
52        smp_wmb();                     
53}       
54
55static inline void write_sequnlock(seqlock_t *sl) 
56{
57        smp_wmb();
58        sl->sequence++;
59        spin_unlock(&sl->lock);
60}
61
62static inline int write_tryseqlock(seqlock_t *sl)
63{
64        int ret = spin_trylock(&sl->lock);
65
66        if (ret) {
67                ++sl->sequence;
68                smp_wmb();                     
69        }
70        return ret;
71}
72
73/* Start of read calculation -- fetch last complete writer token */
74static inline unsigned read_seqbegin(const seqlock_t *sl)
75{
76        unsigned ret = sl->sequence;
77        smp_rmb();
78        return ret;
79}
80
81/* Test if reader processed invalid data.
82 * If initial values is odd,
83 *      then writer had already started when section was entered
84 * If sequence value changed
85 *      then writer changed data while in section
86 *   
87 * Using xor saves one conditional branch.
88 */
89static inline int read_seqretry(const seqlock_t *sl, unsigned iv)
90{
91        smp_rmb();
92        return (iv & 1) | (sl->sequence ^ iv);
93}
94
95
96/*
97 * Version using sequence counter only.
98 * This can be used when code has its own mutex protecting the
99 * updating starting before the write_seqcountbeqin() and ending
100 * after the write_seqcount_end().
101 */
102
103typedef struct seqcount {
104        unsigned sequence;
105} seqcount_t;
106
107#define SEQCNT_ZERO { 0 }
108#define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
109
110/* Start of read using pointer to a sequence counter only.  */
111static inline unsigned read_seqcount_begin(const seqcount_t *s)
112{
113        unsigned ret = s->sequence;
114        smp_rmb();
115        return ret;
116}
117
118/* Test if reader processed invalid data.
119 * Equivalent to: iv is odd or sequence number has changed.
120 *                (iv & 1) || (*s != iv)
121 * Using xor saves one conditional branch.
122 */
123static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
124{
125        smp_rmb();
126        return (iv & 1) | (s->sequence ^ iv);
127}
128
129
130/*
131 * Sequence counter only version assumes that callers are using their
132 * own mutexing.
133 */
134static inline void write_seqcount_begin(seqcount_t *s)
135{
136        s->sequence++;
137        smp_wmb();
138}
139
140static inline void write_seqcount_end(seqcount_t *s)
141{
142        smp_wmb();
143        s->sequence++;
144}
145
146/*
147 * Possible sw/hw IRQ protected versions of the interfaces.
148 */
149#define write_seqlock_irqsave(lock, flags)                              \
150        do { local_irq_save(flags); write_seqlock(lock); } while (0)
151#define write_seqlock_irq(lock)                                         \
152        do { local_irq_disable();   write_seqlock(lock); } while (0)
153#define write_seqlock_bh(lock)                                          \
154        do { local_bh_disable();    write_seqlock(lock); } while (0)
155
156#define write_sequnlock_irqrestore(lock, flags)                         \
157        do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
158#define write_sequnlock_irq(lock)                                       \
159        do { write_sequnlock(lock); local_irq_enable(); } while(0)
160#define write_sequnlock_bh(lock)                                        \
161        do { write_sequnlock(lock); local_bh_enable(); } while(0)
162
163#define read_seqbegin_irqsave(lock, flags)                              \
164        ({ local_irq_save(flags);   read_seqbegin(lock); })
165
166#define read_seqretry_irqrestore(lock, iv, flags)                       \
167        ({                                                              \
168                int ret = read_seqretry(lock, iv);                      \
169                local_irq_restore(flags);                               \
170                ret;                                                    \
171        })
172
173#endif /* __LINUX_SEQLOCK_H */
Note: See TracBrowser for help on using the repository browser.