blob: 2f428b6b988d667820bf605db1ce2b6cf934d8f8 [file] [log] [blame]
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use base/atomicops.h instead.
#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#if defined(__LB_XB1__) && !defined(WIN32)
# include <intrin.h>
// These are normally defined by windows.h:
typedef long LONG;
typedef long long LONGLONG;
typedef void* PVOID;
void __faststorefence(void);
#pragma intrinsic(__faststorefence)
inline void MemoryBarrier() {
__faststorefence();
}
#define InterlockedCompareExchange _InterlockedCompareExchange
#define InterlockedCompareExchangePointer _InterlockedCompareExchangePointer
#define InterlockedExchangePointer _InterlockedExchangePointer
#define InterlockedExchange _InterlockedExchange
#define InterlockedExchangeAdd _InterlockedExchangeAdd
#define InterlockedExchangeAdd64 _InterlockedExchangeAdd64
#else
# include <windows.h>
#if defined(ARCH_CPU_64_BITS) || defined(__LB_XB360__)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
// implementation directly.
#undef MemoryBarrier
#endif
#endif
namespace base {
namespace subtle {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = InterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return InterlockedExchangeAdd(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) + increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
#if defined(ARCH_CPU_64_BITS) || defined(__LB_XB360__)
// See #undef and note at the top of this file.
__faststorefence();
#else
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
#endif
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = InterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return InterlockedExchangeAdd64(
reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) + increment;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(_WIN64)
} // namespace base::subtle
} // namespace base
#if defined(__LB_XB1__)
#undef InterlockedCompareExchange
#undef InterlockedCompareExchangePointer
#undef InterlockedExchangePointer
#undef InterlockedExchange
#undef InterlockedExchangeAdd
#undef InterlockedExchangeAdd64
#endif
#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_