spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / include / linux / proportions.h
blob26a8a4ed9b07bbf6c779545d547f3aab3f5103d4
1 /*
2 * FLoating proportions
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * This file contains the public data structure and API definitions.
7 */
9 #ifndef _LINUX_PROPORTIONS_H
10 #define _LINUX_PROPORTIONS_H
12 #include <linux/percpu_counter.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
16 struct prop_global {
18 * The period over which we differentiate
20 * period = 2^shift
22 int shift;
24 * The total event counter aka 'time'.
26 * Treated as an unsigned long; the lower 'shift - 1' bits are the
27 * counter bits, the remaining upper bits the period counter.
29 struct percpu_counter events;
33 * global proportion descriptor
35 * this is needed to consitently flip prop_global structures.
37 struct prop_descriptor {
38 int index;
39 struct prop_global pg[2];
40 struct mutex mutex; /* serialize the prop_global switch */
43 int prop_descriptor_init(struct prop_descriptor *pd, int shift);
44 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
47 * ----- PERCPU ------
50 struct prop_local_percpu {
52 * the local events counter
54 struct percpu_counter events;
57 * snapshot of the last seen global state
59 int shift;
60 unsigned long period;
61 raw_spinlock_t lock; /* protect the snapshot state */
64 int prop_local_init_percpu(struct prop_local_percpu *pl);
65 void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
68 long *numerator, long *denominator);
70 static inline
71 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
73 unsigned long flags;
75 local_irq_save(flags);
76 __prop_inc_percpu(pd, pl);
77 local_irq_restore(flags);
81 * Limit the time part in order to ensure there are some bits left for the
82 * cycle counter and fraction multiply.
84 #if BITS_PER_LONG == 32
85 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
86 #else
87 #define PROP_MAX_SHIFT (BITS_PER_LONG/2)
88 #endif
90 #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
91 #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
93 void __prop_inc_percpu_max(struct prop_descriptor *pd,
94 struct prop_local_percpu *pl, long frac);
98 * ----- SINGLE ------
101 struct prop_local_single {
103 * the local events counter
105 unsigned long events;
108 * snapshot of the last seen global state
109 * and a lock protecting this state
111 unsigned long period;
112 int shift;
113 raw_spinlock_t lock; /* protect the snapshot state */
116 #define INIT_PROP_LOCAL_SINGLE(name) \
117 { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
120 int prop_local_init_single(struct prop_local_single *pl);
121 void prop_local_destroy_single(struct prop_local_single *pl);
122 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
123 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
124 long *numerator, long *denominator);
126 static inline
127 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
129 unsigned long flags;
131 local_irq_save(flags);
132 __prop_inc_single(pd, pl);
133 local_irq_restore(flags);
136 #endif /* _LINUX_PROPORTIONS_H */