1 #ifndef __ASM_SOFTIRQ_H
2 #define __ASM_SOFTIRQ_H
4 #include <asm/atomic.h>
5 #include <asm/hardirq.h>
7 extern unsigned int local_bh_count
[NR_CPUS
];
9 #define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
10 #define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
12 #define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
13 #define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
15 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
16 #define local_bh_enable() cpu_bh_enable(smp_processor_id())
18 #define get_active_bhs() (bh_mask & bh_active)
19 #define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
21 extern inline void init_bh(int nr
, void (*routine
)(void))
23 bh_base
[nr
] = routine
;
24 atomic_set(&bh_mask_count
[nr
], 0);
28 extern inline void remove_bh(int nr
)
30 bh_mask
&= ~(1 << nr
);
35 extern inline void mark_bh(int nr
)
37 set_bit(nr
, &bh_active
);
41 #error SMP not supported
44 extern inline void start_bh_atomic(void)
50 extern inline void end_bh_atomic(void)
56 /* These are for the irq's testing the lock */
57 #define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
58 #define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
59 #define synchronize_bh() barrier()
64 * These use a mask count to correctly handle
65 * nested disable/enable calls
67 extern inline void disable_bh(int nr
)
69 bh_mask
&= ~(1 << nr
);
70 atomic_inc(&bh_mask_count
[nr
]);
74 extern inline void enable_bh(int nr
)
76 if (atomic_dec_and_test(&bh_mask_count
[nr
]))
80 #endif /* __ASM_SOFTIRQ_H */