sysfs: Remove support for tagged directories with untagged members (again)
[linux-btrfs-devel.git] / include / linux / proportions.h
blobcf793bbbd05e18e6d87e98dd1e3e4f9df767d8d6
1 /*
2 * FLoating proportions
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * This file contains the public data structure and API definitions.
7 */
9 #ifndef _LINUX_PROPORTIONS_H
10 #define _LINUX_PROPORTIONS_H
12 #include <linux/percpu_counter.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
16 struct prop_global {
18 * The period over which we differentiate
20 * period = 2^shift
22 int shift;
24 * The total event counter aka 'time'.
26 * Treated as an unsigned long; the lower 'shift - 1' bits are the
27 * counter bits, the remaining upper bits the period counter.
29 struct percpu_counter events;
33 * global proportion descriptor
35 * this is needed to consitently flip prop_global structures.
37 struct prop_descriptor {
38 int index;
39 struct prop_global pg[2];
40 struct mutex mutex; /* serialize the prop_global switch */
43 int prop_descriptor_init(struct prop_descriptor *pd, int shift);
44 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
47 * ----- PERCPU ------
50 struct prop_local_percpu {
52 * the local events counter
54 struct percpu_counter events;
57 * snapshot of the last seen global state
59 int shift;
60 unsigned long period;
61 spinlock_t lock; /* protect the snapshot state */
64 int prop_local_init_percpu(struct prop_local_percpu *pl);
65 void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
68 long *numerator, long *denominator);
70 static inline
71 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
73 unsigned long flags;
75 local_irq_save(flags);
76 __prop_inc_percpu(pd, pl);
77 local_irq_restore(flags);
81 * Limit the time part in order to ensure there are some bits left for the
82 * cycle counter and fraction multiply.
84 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
86 #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
87 #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
89 void __prop_inc_percpu_max(struct prop_descriptor *pd,
90 struct prop_local_percpu *pl, long frac);
94 * ----- SINGLE ------
97 struct prop_local_single {
99 * the local events counter
101 unsigned long events;
104 * snapshot of the last seen global state
105 * and a lock protecting this state
107 unsigned long period;
108 int shift;
109 spinlock_t lock; /* protect the snapshot state */
112 #define INIT_PROP_LOCAL_SINGLE(name) \
113 { .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
116 int prop_local_init_single(struct prop_local_single *pl);
117 void prop_local_destroy_single(struct prop_local_single *pl);
118 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
119 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
120 long *numerator, long *denominator);
122 static inline
123 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
125 unsigned long flags;
127 local_irq_save(flags);
128 __prop_inc_single(pd, pl);
129 local_irq_restore(flags);
132 #endif /* _LINUX_PROPORTIONS_H */