Roll leveldb from r73 to r75.
[chromium-blink-merge.git] / skia / ext / SkThread_chrome.cc
blobf379bebee71ca6ece80d221a45d6e55787645b1d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "third_party/skia/include/core/SkThread.h"
7 #include <new>
9 #include "base/atomicops.h"
10 #include "base/basictypes.h"
11 #include "base/logging.h"
12 #include "base/synchronization/lock.h"
14 /** Adds one to the int specified by the address (in a thread-safe manner), and
15 returns the previous value.
16 No additional memory barrier is required.
17 This must act as a compiler barrier.
19 int32_t sk_atomic_inc(int32_t* addr) {
20 // sk_atomic_inc is expected to return the old value,
21 // Barrier_AtomicIncrement returns the new value.
22 return base::subtle::NoBarrier_AtomicIncrement(addr, 1) - 1;
25 /* Subtracts one from the int specified by the address (in a thread-safe
26 manner), and returns the previous value.
27 Expected to act as a release (SL/S) memory barrier and a compiler barrier.
29 int32_t sk_atomic_dec(int32_t* addr) {
30 // sk_atomic_dec is expected to return the old value,
31 // Barrier_AtomicIncrement returns the new value.
32 return base::subtle::Barrier_AtomicIncrement(addr, -1) + 1;
34 /** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
35 to act as an aquire (L/SL) memory barrier and as a compiler barrier.
37 void sk_membar_aquire__after_atomic_dec() { }
39 /** Adds one to the int specified by the address iff the int specified by the
40 address is not zero (in a thread-safe manner), and returns the previous
41 value.
42 No additional memory barrier is required.
43 This must act as a compiler barrier.
45 int32_t sk_atomic_conditional_inc(int32_t* addr) {
46 int32_t value = *addr;
48 while (true) {
49 if (value == 0) {
50 return 0;
53 int32_t before;
54 before = base::subtle::Acquire_CompareAndSwap(addr, value, value + 1);
56 if (before == value) {
57 return value;
58 } else {
59 value = before;
63 /** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
64 is expected to act as an aquire (L/SL) memory barrier and as a compiler
65 barrier.
67 void sk_membar_aquire__after_atomic_conditional_inc() { }
69 SkMutex::SkMutex() {
70 COMPILE_ASSERT(sizeof(base::Lock) <= sizeof(fStorage), Lock_is_too_big_for_SkMutex);
71 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
72 new(lock) base::Lock();
75 SkMutex::~SkMutex() {
76 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
77 lock->~Lock();
80 void SkMutex::acquire() {
81 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
82 lock->Acquire();
85 void SkMutex::release() {
86 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
87 lock->Release();