Import Cobalt 6.14838

Change-Id: I49864fe26f7f6fca3777d185823aa31251e8ae57
diff --git a/src/nb/analytics/memory_tracker.cc b/src/nb/analytics/memory_tracker.cc
new file mode 100644
index 0000000..cf6c868
--- /dev/null
+++ b/src/nb/analytics/memory_tracker.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/analytics/memory_tracker.h"
+
+#include "nb/analytics/memory_tracker_impl.h"
+#include "starboard/once.h"
+
+namespace nb {
+namespace analytics {
+namespace {
+SB_ONCE_INITIALIZE_FUNCTION(MemoryTrackerImpl, GetMemoryTrackerImplSingleton);
+}  // namespace
+
+MemoryTracker* MemoryTracker::Get() {
+  return GetMemoryTrackerImplSingleton();
+}
+
+}  // namespace analytics
+}  // namespace nb
diff --git a/src/nb/analytics/memory_tracker.h b/src/nb/analytics/memory_tracker.h
new file mode 100644
index 0000000..27ef75f
--- /dev/null
+++ b/src/nb/analytics/memory_tracker.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NB_MEMORY_TRACKER_H_
+#define NB_MEMORY_TRACKER_H_
+
+#include <vector>
+#include "starboard/configuration.h"
+#include "starboard/types.h"
+
+namespace nb {
+namespace analytics {
+
+class MemoryTracker;
+class AllocationVisitor;
+class AllocationGroup;
+class AllocationRecord;
+
+// Creates a MemoryTracker instance that implements the
+//  MemoryTracker. Once the instance is created it can begin tracking
+//  system allocations by calling InstallGlobalTrackingHooks().
+//  Deleting the MemoryTracker is forbidden.
+//
+// Example, Creation and Hooking:
+//   static MemoryTracker* s_global_tracker =
+//       GetOrCreateMemoryTracker();
+//   s_global_tracker->InstallGlobalTrackingHooks();  // now tracking memory.
+//
+// Data about the allocations are aggregated under AllocationGroups and it's
+//  recommended that GetAllocationGroups(...) is used to get simple allocation
+//  statistics.
+//
+// Deeper analytics are possible by creating an AllocationVisitor subclass and
+//  traversing through the internal allocations of the tracker. In this way all
+//  known information about allocation state of the program is made accessible.
+//  The visitor does not need to perform any locking as this is guaranteed by
+//  the MemoryTracker.
+//
+// Example (AllocationVisitor):
+//  MyAllocation visitor = ...;
+//  s_global_tracker->Accept(&visitor);
+//  visitor.PrintAllocations();
+//
+// Performance:
+//  1) Gold builds disallow memory tracking and therefore have zero-cost
+//     for this feature.
+//  2) All other builds that allow memory tracking have minimal cost as long
+//     as memory tracking has not been activated. This is facilitated by NOT
+//     using locks, at the expense of thread safety during teardown (hence the
+//     reason why you should NOT delete a memory tracker with hooks installed).
+//  3) When the memory tracking has been activated then there is a non-trivial
+//     performance cost in terms of CPU and memory for the feature.
+class MemoryTracker {
+ public:
+  // Gets the singleton instance of the default MemoryTracker. This
+  // is created the first time it is used.
+  static MemoryTracker* Get();
+
+  MemoryTracker() {}
+  virtual bool InstallGlobalTrackingHooks() = 0;
+
+  // It's recommended the MemoryTracker is never removed or deleted during the
+  // runtime.
+  virtual void RemoveGlobalTrackingHooks() = 0;
+
+  // Returns the total amount of bytes that are tracked.
+  virtual int64_t GetTotalAllocationBytes() = 0;
+  virtual int64_t GetTotalNumberOfAllocations() = 0;
+
+  // Allows probing of all memory allocations. The visitor does not need to
+  // perform any locking and can allocate memory during it's operation.
+  virtual void Accept(AllocationVisitor* visitor) = 0;
+
+  // Collects all memory groups that exist. The AllocationGroups lifetime
+  // exists for as long as the MemoryTracker instance is alive.
+  virtual void GetAllocationGroups(
+      std::vector<const AllocationGroup*>* output) = 0;
+
+  // Enabled/disables memory tracking in the current thread.
+  virtual void SetMemoryTrackingEnabled(bool on) = 0;
+  // Returns the memory tracking state in the current thread.
+  virtual bool IsMemoryTrackingEnabled() const = 0;
+
+  // Returns true if the memory was successfully tracked.
+  virtual bool AddMemoryTracking(const void* memory, size_t size) = 0;
+  // Returns a non-zero size if the memory was successfully removed.
+  virtual size_t RemoveMemoryTracking(const void* memory) = 0;
+  // Returns true if the memory has tracking. When true is returned then the
+  // supplied AllocRecord is written.
+  virtual bool GetMemoryTracking(const void* memory,
+                                 AllocationRecord* record) const = 0;
+
+ protected:
+  virtual ~MemoryTracker() {}
+
+  SB_DISALLOW_COPY_AND_ASSIGN(MemoryTracker);
+};
+
+// A visitor class which is useful for inspecting data.
+class AllocationVisitor {
+ public:
+  // Returns true to keep visiting, otherwise abort.
+  virtual bool Visit(const void* memory,
+                     const AllocationRecord& alloc_record) = 0;
+  virtual ~AllocationVisitor() {}
+};
+
+// Contains an allocation record for a pointer including it's size and what
+// AllocationGroup it was constructed under.
+struct AllocationRecord {
+  AllocationRecord() : size(0), allocation_group(NULL) {}
+  AllocationRecord(size_t sz, AllocationGroup* group)
+      : size(sz), allocation_group(group) {}
+
+  static AllocationRecord Empty() { return AllocationRecord(); }
+  bool IsEmpty() const { return !size && !allocation_group; }
+  size_t size;
+  AllocationGroup* allocation_group;
+};
+
+}  // namespace analytics
+}  // namespace nb
+
+#endif  // NB_MEMORY_TRACKER_H_
diff --git a/src/nb/analytics/memory_tracker_helpers.cc b/src/nb/analytics/memory_tracker_helpers.cc
new file mode 100644
index 0000000..2f4776a
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_helpers.cc
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/analytics/memory_tracker_helpers.h"
+
+#include <stdint.h>
+#include <vector>
+
+#include "nb/hash.h"
+#include "starboard/configuration.h"
+#include "starboard/log.h"
+
+namespace nb {
+namespace analytics {
+
+AllocationGroup::AllocationGroup(const std::string& name)
+    : name_(name), allocation_bytes_(0), num_allocations_(0) {}
+
+AllocationGroup::~AllocationGroup() {}
+
+void AllocationGroup::AddAllocation(int64_t num_bytes) {
+  if (num_bytes == 0)
+    return;
+  int num_alloc_diff = num_bytes > 0 ? 1 : -1;
+
+  allocation_bytes_.fetch_add(num_bytes);
+  num_allocations_.fetch_add(num_alloc_diff);
+}
+
+void AllocationGroup::GetAggregateStats(int32_t* num_allocs,
+                                        int64_t* allocation_bytes) const {
+  *num_allocs = num_allocations_.load();
+  *allocation_bytes = allocation_bytes_.load();
+}
+
+int64_t AllocationGroup::allocation_bytes() const {
+  return allocation_bytes_.load();
+}
+
+int32_t AllocationGroup::num_allocations() const {
+  return num_allocations_.load();
+}
+
+AtomicStringAllocationGroupMap::AtomicStringAllocationGroupMap() {
+  unaccounted_group_ = Ensure("Unaccounted");
+}
+
+AtomicStringAllocationGroupMap::~AtomicStringAllocationGroupMap() {
+  unaccounted_group_ = NULL;
+  while (!group_map_.empty()) {
+    Map::iterator it = group_map_.begin();
+    delete it->second;
+    group_map_.erase(it);
+  }
+}
+
+AllocationGroup* AtomicStringAllocationGroupMap::Ensure(
+    const std::string& name) {
+  starboard::ScopedLock lock(mutex_);
+  Map::const_iterator found_it = group_map_.find(name);
+  if (found_it != group_map_.end()) {
+    return found_it->second;
+  }
+  AllocationGroup* group = new AllocationGroup(name);
+  group_map_[name] = group;
+  return group;
+}
+
+AllocationGroup* AtomicStringAllocationGroupMap::GetDefaultUnaccounted() {
+  return unaccounted_group_;
+}
+
+bool AtomicStringAllocationGroupMap::Erase(const std::string& name) {
+  starboard::ScopedLock lock(mutex_);
+  Map::iterator found_it = group_map_.find(name);
+  if (found_it == group_map_.end()) {
+    // Didn't find it.
+    return false;
+  }
+  group_map_.erase(found_it);
+  return true;
+}
+
+void AtomicStringAllocationGroupMap::GetAll(
+    std::vector<const AllocationGroup*>* output) const {
+  starboard::ScopedLock lock(mutex_);
+  for (Map::const_iterator it = group_map_.begin(); it != group_map_.end();
+       ++it) {
+    output->push_back(it->second);
+  }
+}
+
+void AllocationGroupStack::Push(AllocationGroup* group) {
+  alloc_group_stack_.push_back(group);
+}
+
+void AllocationGroupStack::Pop() {
+  alloc_group_stack_.pop_back();
+}
+
+AllocationGroup* AllocationGroupStack::Peek() {
+  if (alloc_group_stack_.empty()) {
+    return NULL;
+  }
+  return alloc_group_stack_.back();
+}
+
+AtomicAllocationMap::AtomicAllocationMap() {}
+
+AtomicAllocationMap::~AtomicAllocationMap() {}
+
+bool AtomicAllocationMap::Add(const void* memory,
+                              const AllocationRecord& alloc_record) {
+  starboard::ScopedLock lock(mutex_);
+  const bool inserted =
+      pointer_map_.insert(std::make_pair(memory, alloc_record)).second;
+  return inserted;
+}
+
+bool AtomicAllocationMap::Get(const void* memory,
+                              AllocationRecord* alloc_record) const {
+  starboard::ScopedLock lock(mutex_);
+  PointerMap::const_iterator found_it = pointer_map_.find(memory);
+  if (found_it == pointer_map_.end()) {
+    if (alloc_record) {
+      *alloc_record = AllocationRecord::Empty();
+    }
+    return false;
+  }
+  if (alloc_record) {
+    *alloc_record = found_it->second;
+  }
+  return true;
+}
+
+bool AtomicAllocationMap::Remove(const void* memory,
+                                 AllocationRecord* alloc_record) {
+  starboard::ScopedLock lock(mutex_);
+  PointerMap::iterator found_it = pointer_map_.find(memory);
+
+  if (found_it == pointer_map_.end()) {
+    if (alloc_record) {
+      *alloc_record = AllocationRecord::Empty();
+    }
+    return false;
+  }
+  if (alloc_record) {
+    *alloc_record = found_it->second;
+  }
+  pointer_map_.erase(found_it);
+  return true;
+}
+
+bool AtomicAllocationMap::Accept(AllocationVisitor* visitor) const {
+  starboard::ScopedLock lock(mutex_);
+  for (PointerMap::const_iterator it = pointer_map_.begin();
+       it != pointer_map_.end(); ++it) {
+    const void* memory = it->first;
+    const AllocationRecord& alloc_rec = it->second;
+    if (!visitor->Visit(memory, alloc_rec)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+size_t AtomicAllocationMap::Size() const {
+  starboard::ScopedLock lock(mutex_);
+  return pointer_map_.size();
+}
+
+bool AtomicAllocationMap::Empty() const {
+  starboard::ScopedLock lock(mutex_);
+  return pointer_map_.empty();
+}
+
+void AtomicAllocationMap::Clear() {
+  starboard::ScopedLock lock(mutex_);
+  for (PointerMap::iterator it = pointer_map_.begin();
+       it != pointer_map_.end(); ++it) {
+    const AllocationRecord& rec = it->second;
+    AllocationGroup* group = rec.allocation_group;
+    group->AddAllocation(-rec.size);
+  }
+  return pointer_map_.clear();
+}
+
+ConcurrentAllocationMap::ConcurrentAllocationMap() : pointer_map_array_() {}
+
+ConcurrentAllocationMap::~ConcurrentAllocationMap() {
+  Clear();
+}
+
+bool ConcurrentAllocationMap::Add(const void* memory,
+                                  const AllocationRecord& alloc_record) {
+  AtomicAllocationMap& map = GetMapForPointer(memory);
+  return map.Add(memory, alloc_record);
+}
+
+bool ConcurrentAllocationMap::Get(const void* memory,
+                                  AllocationRecord* alloc_record) const {
+  const AtomicAllocationMap& map = GetMapForPointer(memory);
+  return map.Get(memory, alloc_record);
+}
+
+bool ConcurrentAllocationMap::Remove(const void* memory,
+                                     AllocationRecord* alloc_record) {
+  AtomicAllocationMap& map = GetMapForPointer(memory);
+  bool output = map.Remove(memory, alloc_record);
+  return output;
+}
+
+size_t ConcurrentAllocationMap::Size() const {
+  size_t size = 0;
+  for (int i = 0; i < kNumElements; ++i) {
+    const AtomicAllocationMap& map = pointer_map_array_[i];
+    size += map.Size();
+  }
+  return size;
+}
+
+bool ConcurrentAllocationMap::Empty() const {
+  return 0 == Size();
+}
+
+void ConcurrentAllocationMap::Clear() {
+  for (int i = 0; i < kNumElements; ++i) {
+    AtomicAllocationMap& map = pointer_map_array_[i];
+    map.Clear();
+  }
+}
+
+bool ConcurrentAllocationMap::Accept(AllocationVisitor* visitor) const {
+  for (int i = 0; i < kNumElements; ++i) {
+    const AtomicAllocationMap& map = pointer_map_array_[i];
+    if (!map.Accept(visitor)) {
+      return false;  // Early out.
+    }
+  }
+  return true;
+}
+
+size_t ConcurrentAllocationMap::hash_ptr(const void* ptr) {
+  uintptr_t val = reinterpret_cast<uintptr_t>(ptr);
+
+  return RuntimeHash32(reinterpret_cast<const char*>(&val), sizeof(val));
+}
+
+int ConcurrentAllocationMap::ToIndex(const void* ptr) const {
+  SB_DCHECK(0 != kNumElements);
+  uint32_t hash_val = hash_ptr(ptr);
+  int index = hash_val % kNumElements;
+  return index;
+}
+
+AtomicAllocationMap& ConcurrentAllocationMap::GetMapForPointer(
+    const void* ptr) {
+  return pointer_map_array_[ToIndex(ptr)];
+}
+
+const AtomicAllocationMap& ConcurrentAllocationMap::GetMapForPointer(
+    const void* ptr) const {
+  return pointer_map_array_[ToIndex(ptr)];
+}
+
+SimpleThread::SimpleThread(const std::string& name)
+    : thread_(kSbThreadInvalid), name_(name) {}
+
+SimpleThread::~SimpleThread() {}
+
+void SimpleThread::Start() {
+  SbThreadEntryPoint entry_point = ThreadEntryPoint;
+
+  thread_ = SbThreadCreate(0,                    // default stack_size.
+                           kSbThreadNoPriority,  // default priority.
+                           kSbThreadNoAffinity,  // default affinity.
+                           true,                 // joinable.
+                           name_.c_str(), entry_point, this);
+
+  // SbThreadCreate() above produced an invalid thread handle.
+  SB_DCHECK(thread_ != kSbThreadInvalid);
+  return;
+}
+
+void* SimpleThread::ThreadEntryPoint(void* context) {
+  SimpleThread* this_ptr = static_cast<SimpleThread*>(context);
+  this_ptr->Run();
+  return NULL;
+}
+
+void SimpleThread::DoJoin() {
+  if (!SbThreadJoin(thread_, NULL)) {
+    SB_DCHECK(false) << "Could not join thread.";
+  }
+}
+
+}  // namespace analytics
+}  // namespace nb
diff --git a/src/nb/analytics/memory_tracker_helpers.h b/src/nb/analytics/memory_tracker_helpers.h
new file mode 100644
index 0000000..d07651c
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_helpers.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NB_MEMORY_TRACKER_HELPERS_H_
+#define NB_MEMORY_TRACKER_HELPERS_H_
+
+#include <map>
+#include <vector>
+
+#include "nb/analytics/memory_tracker.h"
+#include "nb/atomic.h"
+#include "starboard/mutex.h"
+#include "starboard/thread.h"
+#include "starboard/types.h"
+#include "starboard/log.h"
+
+namespace nb {
+namespace analytics {
+
+class AllocationGroup;
+struct AllocationRecord;
+
+template <typename Type>
+class ThreadLocalPointer {
+ public:
+  ThreadLocalPointer() {
+    slot_ = SbThreadCreateLocalKey(NULL);  // No destructor for pointer.
+    SB_DCHECK(kSbThreadLocalKeyInvalid != slot_);
+  }
+
+  ~ThreadLocalPointer() { SbThreadDestroyLocalKey(slot_); }
+
+  Type* Get() const {
+    void* ptr = SbThreadGetLocalValue(slot_);
+    Type* type_ptr = static_cast<Type*>(ptr);
+    return type_ptr;
+  }
+
+  void Set(Type* ptr) {
+    void* void_ptr = static_cast<void*>(ptr);
+    SbThreadSetLocalValue(slot_, void_ptr);
+  }
+
+ private:
+  SbThreadLocalKey slot_;
+  SB_DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
+};
+
+class ThreadLocalBoolean {
+ public:
+  ThreadLocalBoolean() : default_value_(false) {}
+  explicit ThreadLocalBoolean(bool default_value)
+      : default_value_(default_value) {}
+  ~ThreadLocalBoolean() {}
+
+  bool Get() const {
+    bool val = tlp_.Get() != NULL;
+    return val ^ default_value_;
+  }
+
+  void Set(bool val) {
+    val = val ^ default_value_;
+    tlp_.Set(val ? TruePointer() : FalsePointer());
+  }
+
+ private:
+  static void* TruePointer() { return reinterpret_cast<void*>(0x1); }
+  static void* FalsePointer() { return NULL; }
+  ThreadLocalPointer<void> tlp_;
+  const bool default_value_;
+
+  SB_DISALLOW_COPY_AND_ASSIGN(ThreadLocalBoolean);
+};
+
+// An AllocationGroup is a collection of allocations that are logically lumped
+// together, such as "Javascript" or "Graphics".
+class AllocationGroup {
+ public:
+  AllocationGroup(const std::string& name);
+  ~AllocationGroup();
+  const std::string& name() const { return name_; }
+
+  void AddAllocation(int64_t num_bytes);
+  void GetAggregateStats(int32_t* num_allocs, int64_t* allocation_bytes) const;
+
+  int64_t allocation_bytes() const;
+  int32_t num_allocations() const;
+
+ private:
+  const std::string name_;
+  nb::atomic_int64_t allocation_bytes_;
+  nb::atomic_int32_t num_allocations_;
+};
+
+// A self locking data structure that maps strings -> AllocationGroups. This is
+// used to resolve MemoryGroup names (e.g. "Javascript") to an AllocationGroup
+// which can be used to group allocations together.
+class AtomicStringAllocationGroupMap {
+ public:
+  AtomicStringAllocationGroupMap();
+  ~AtomicStringAllocationGroupMap();
+
+  AllocationGroup* Ensure(const std::string& name);
+  AllocationGroup* GetDefaultUnaccounted();
+
+  bool Erase(const std::string& key);
+  void GetAll(std::vector<const AllocationGroup*>* output) const;
+
+ private:
+  typedef std::map<std::string, AllocationGroup*> Map;
+  Map group_map_;
+  AllocationGroup* unaccounted_group_;
+  mutable starboard::Mutex mutex_;
+};
+
+class AllocationGroupStack {
+ public:
+  AllocationGroupStack() { Push_DebugBreak(NULL); }
+  ~AllocationGroupStack() {}
+
+  void Push(AllocationGroup* group);
+  void Pop();
+  AllocationGroup* Peek();
+
+  void Push_DebugBreak(AllocationGroup* ag) { debug_stack_.push_back(ag); }
+  void Pop_DebugBreak() { debug_stack_.pop_back(); }
+  AllocationGroup* Peek_DebugBreak() {
+    if (debug_stack_.empty()) {
+      return NULL;
+    }
+    return debug_stack_.back();
+  }
+
+ private:
+  SB_DISALLOW_COPY_AND_ASSIGN(AllocationGroupStack);
+  typedef std::vector<AllocationGroup*> AllocationGroupPtrVec;
+  AllocationGroupPtrVec alloc_group_stack_, debug_stack_;
+};
+
+// A per-pointer map of allocations to AllocRecords. This map is thread safe.
+class AtomicAllocationMap {
+ public:
+  AtomicAllocationMap();
+  ~AtomicAllocationMap();
+
+  // Returns true if Added. Otherwise false means that the pointer
+  // already existed.
+  bool Add(const void* memory, const AllocationRecord& alloc_record);
+
+  // Returns true if the memory exists in this set.
+  bool Get(const void* memory, AllocationRecord* alloc_record) const;
+
+  // Return true if the memory existed in this set. If true
+  // then output alloc_record is written with record that was found.
+  // otherwise the record is written as 0 bytes and null key.
+  bool Remove(const void* memory, AllocationRecord* alloc_record);
+
+  bool Accept(AllocationVisitor* visitor) const;
+
+  size_t Size() const;
+  bool Empty() const;
+  void Clear();
+
+ private:
+  SB_DISALLOW_COPY_AND_ASSIGN(AtomicAllocationMap);
+  typedef std::map<const void*, AllocationRecord> PointerMap;
+
+  PointerMap pointer_map_;
+  mutable starboard::Mutex mutex_;
+};
+
+// A per-pointer map of allocations to AllocRecords. This is a hybrid data
+// structure consisting of a hashtable of maps. Each pointer that is
+// stored or retrieved is hashed to a random bucket. Each bucket has it's own
+// lock. This distributed pattern increases performance significantly by
+// reducing contention. The top-level hashtable is of constant size and does
+// not resize. Each bucket is implemented as it's own map of elements.
+class ConcurrentAllocationMap {
+ public:
+  static const int kNumElements = 511;
+  ConcurrentAllocationMap();
+  ~ConcurrentAllocationMap();
+
+  // Returns true if Added. Otherwise false means that the pointer
+  // already existed.
+  bool Add(const void* memory, const AllocationRecord& alloc_record);
+  // Returns true if the memory exists in this set.
+  bool Get(const void* memory, AllocationRecord* alloc_record) const;
+  // Return true if the memory existed in this set. If true
+  // then output alloc_record is written with record that was found.
+  // otherwise the record is written as 0 bytes and null key.
+  bool Remove(const void* memory, AllocationRecord* alloc_record);
+  size_t Size() const;
+  bool Empty() const;
+  void Clear();
+
+  // Provides access to all the allocations within in a thread safe manner.
+  bool Accept(AllocationVisitor* visitor) const;
+
+  AtomicAllocationMap& GetMapForPointer(const void* ptr);
+  const AtomicAllocationMap& GetMapForPointer(const void* ptr) const;
+
+ private:
+  SB_DISALLOW_COPY_AND_ASSIGN(ConcurrentAllocationMap);
+  // Takes a pointer and generates a hash.
+  static size_t hash_ptr(const void* ptr);
+
+  int ToIndex(const void* ptr) const;
+  AtomicAllocationMap pointer_map_array_[kNumElements];
+};
+
+class SimpleThread {
+ public:
+  explicit SimpleThread(const std::string& name);
+  virtual ~SimpleThread() = 0;
+
+  // Subclasses should override the Run method.
+  virtual void Run() = 0;
+
+  void Join() {
+    Cancel();
+    DoJoin();
+  }
+
+  // If Join() is intended to interrupt the Run() function then override
+  // Cancel() to send a signal.
+  // Example:
+  //   virtual void Cancel() { finished_ = true; }
+  //   virtual void Run() {
+  //     while (!finished_) { /* do work */ }
+  //   }
+  virtual void Cancel() {}
+
+  // Calls SbThreadCreate() and starts running code.
+  void Start();
+
+ private:
+  static void* ThreadEntryPoint(void* context);
+  void DoJoin();
+  void DoStart();
+
+  const std::string name_;
+  SbThread thread_;
+
+  SB_DISALLOW_COPY_AND_ASSIGN(SimpleThread);
+};
+
+}  // namespace analytics
+}  // namespace nb
+
+#endif  // NB_MEMORY_TRACKER_HELPERS_H_
diff --git a/src/nb/analytics/memory_tracker_helpers_test.cc b/src/nb/analytics/memory_tracker_helpers_test.cc
new file mode 100644
index 0000000..ebef340
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_helpers_test.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/analytics/memory_tracker_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace nb {
+namespace analytics {
+namespace {
+
+///////////////////////////////////////////////////////////////////////////////
+TEST(AtomicStringAllocationGroupMap, Use) {
+  AtomicStringAllocationGroupMap map;
+  AllocationGroup* tag = map.Ensure("MemoryRegion");
+  EXPECT_TRUE(tag != NULL);
+  EXPECT_EQ(std::string("MemoryRegion"), tag->name());
+  AllocationGroup* tag2 = map.Ensure("MemoryRegion");
+  EXPECT_EQ(tag, tag2);
+}
+
+TEST(AtomicAllocationMap, AddHasRemove) {
+  AtomicAllocationMap atomic_pointer_map;
+  int32_t int_a = 0;
+  int32_t int_b = 1;
+
+  // Initially empty.
+  EXPECT_TRUE(atomic_pointer_map.Empty());
+
+  const AllocationRecord int32_alloc_record =
+      AllocationRecord(sizeof(int32_t), NULL);
+  AllocationRecord alloc_output;
+
+  EXPECT_TRUE(atomic_pointer_map.Add(&int_a, int32_alloc_record));
+  EXPECT_FALSE(atomic_pointer_map.Add(&int_a, int32_alloc_record));
+  EXPECT_TRUE(atomic_pointer_map.Get(&int_a, &alloc_output));
+  EXPECT_EQ(alloc_output.size, sizeof(int32_t));
+
+  EXPECT_FALSE(atomic_pointer_map.Get(&int_b, &alloc_output));
+  EXPECT_EQ(0, alloc_output.size);
+  // Adding pointer to int_a increases set to 1 element.
+  EXPECT_EQ(atomic_pointer_map.Size(), 1);
+  EXPECT_FALSE(atomic_pointer_map.Empty());
+
+  // Adds pointer to int_b.
+  EXPECT_TRUE(atomic_pointer_map.Add(&int_b, int32_alloc_record));
+  EXPECT_TRUE(atomic_pointer_map.Get(&int_b, &alloc_output));
+  EXPECT_EQ(sizeof(int32_t), alloc_output.size);
+  // Expect that the second pointer added will increase the number of elements.
+  EXPECT_EQ(atomic_pointer_map.Size(), 2);
+  EXPECT_FALSE(atomic_pointer_map.Empty());
+
+  // Now remove the elements and ensure that they no longer found and that
+  // the size of the table shrinks to empty.
+  EXPECT_TRUE(atomic_pointer_map.Remove(&int_a, &alloc_output));
+  EXPECT_EQ(sizeof(int32_t), alloc_output.size);
+  EXPECT_EQ(atomic_pointer_map.Size(), 1);
+  EXPECT_FALSE(atomic_pointer_map.Remove(&int_a, &alloc_output));
+  EXPECT_EQ(0, alloc_output.size);
+  EXPECT_TRUE(atomic_pointer_map.Remove(&int_b, &alloc_output));
+  EXPECT_EQ(atomic_pointer_map.Size(), 0);
+
+  EXPECT_TRUE(atomic_pointer_map.Empty());
+}
+
+TEST(ConcurrentAllocationMap, AddHasRemove) {
+  ConcurrentAllocationMap alloc_map;
+  int32_t int_a = 0;
+  int32_t int_b = 1;
+
+  // Initially empty.
+  EXPECT_TRUE(alloc_map.Empty());
+
+  const AllocationRecord int32_alloc_record =
+      AllocationRecord(sizeof(int32_t), NULL);
+  AllocationRecord alloc_output;
+
+  EXPECT_TRUE(alloc_map.Add(&int_a, int32_alloc_record));
+  EXPECT_FALSE(alloc_map.Add(&int_a, int32_alloc_record));
+  EXPECT_TRUE(alloc_map.Get(&int_a, &alloc_output));
+  EXPECT_EQ(alloc_output.size, sizeof(int32_t));
+
+  EXPECT_FALSE(alloc_map.Get(&int_b, &alloc_output));
+  EXPECT_EQ(0, alloc_output.size);
+  // Adding pointer to int_a increases set to 1 element.
+  EXPECT_EQ(alloc_map.Size(), 1);
+  EXPECT_FALSE(alloc_map.Empty());
+
+  // Adds pointer to int_b.
+  EXPECT_TRUE(alloc_map.Add(&int_b, int32_alloc_record));
+  EXPECT_TRUE(alloc_map.Get(&int_b, &alloc_output));
+  EXPECT_EQ(sizeof(int32_t), alloc_output.size);
+  // Expect that the second pointer added will increase the number of elements.
+  EXPECT_EQ(alloc_map.Size(), 2);
+  EXPECT_FALSE(alloc_map.Empty());
+
+  // Now remove the elements and ensure that they no longer found and that
+  // the size of the table shrinks to empty.
+  EXPECT_TRUE(alloc_map.Remove(&int_a, &alloc_output));
+  EXPECT_EQ(sizeof(int32_t), alloc_output.size);
+  EXPECT_EQ(alloc_map.Size(), 1);
+  EXPECT_FALSE(alloc_map.Remove(&int_a, &alloc_output));
+  EXPECT_EQ(0, alloc_output.size);
+  EXPECT_TRUE(alloc_map.Remove(&int_b, &alloc_output));
+  EXPECT_EQ(alloc_map.Size(), 0);
+
+  EXPECT_TRUE(alloc_map.Empty());
+}
+
+}  // namespace
+}  // namespace analytics
+}  // namespace nb
diff --git a/src/nb/analytics/memory_tracker_impl.cc b/src/nb/analytics/memory_tracker_impl.cc
new file mode 100644
index 0000000..fecda24
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_impl.cc
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/analytics/memory_tracker_impl.h"
+
+#include <iomanip>
+#include <sstream>
+
+#include "nb/atomic.h"
+#include "starboard/atomic.h"
+
+namespace nb {
+namespace analytics {
+
+SbMemoryReporter* MemoryTrackerImpl::GetMemoryReporter() {
+  return &sb_memory_tracker_;
+}
+
+NbMemoryScopeReporter* MemoryTrackerImpl::GetMemoryScopeReporter() {
+  return &nb_memory_scope_reporter_;
+}
+
+int64_t MemoryTrackerImpl::GetTotalAllocationBytes() {
+  return total_bytes_allocated_.load();
+}
+
+AllocationGroup* MemoryTrackerImpl::GetAllocationGroup(const char* name) {
+  DisableMemoryTrackingInScope no_tracking(this);
+  AllocationGroup* alloc_group = alloc_group_map_.Ensure(name);
+  return alloc_group;
+}
+
+void MemoryTrackerImpl::PushAllocationGroupByName(const char* group_name) {
+  AllocationGroup* group = GetAllocationGroup(group_name);
+  PushAllocationGroup(group);
+}
+
+void MemoryTrackerImpl::PushAllocationGroup(AllocationGroup* alloc_group) {
+  if (alloc_group == NULL) {
+    alloc_group = alloc_group_map_.GetDefaultUnaccounted();
+  }
+  DisableMemoryTrackingInScope no_tracking(this);
+  allocation_group_stack_tls_.GetOrCreate()->Push(alloc_group);
+}
+
+AllocationGroup* MemoryTrackerImpl::PeekAllocationGroup() {
+  DisableMemoryTrackingInScope no_tracking(this);
+  AllocationGroup* out =
+      allocation_group_stack_tls_.GetOrCreate()->Peek();
+  if (out == NULL) {
+    out = alloc_group_map_.GetDefaultUnaccounted();
+  }
+  return out;
+}
+
+void MemoryTrackerImpl::PopAllocationGroup() {
+  DisableMemoryTrackingInScope no_tracking(this);
+  AllocationGroupStack* alloc_tls = allocation_group_stack_tls_.GetOrCreate();
+  alloc_tls->Pop();
+  AllocationGroup* group = alloc_tls->Peek();
+  // We don't allow null, so if this is encountered then push the
+  // "default unaccounted" alloc group.
+  if (group == NULL) {
+    alloc_tls->Push(alloc_group_map_.GetDefaultUnaccounted());
+  }
+}
+
+void MemoryTrackerImpl::GetAllocationGroups(
+    std::vector<const AllocationGroup*>* output) {
+  DisableMemoryTrackingInScope no_allocation_tracking(this);
+  output->reserve(100);
+  alloc_group_map_.GetAll(output);
+}
+
+void MemoryTrackerImpl::GetAllocationGroups(
+    std::map<std::string, const AllocationGroup*>* output) {
+  output->clear();
+  DisableMemoryTrackingInScope no_tracking(this);
+  std::vector<const AllocationGroup*> tmp;
+  GetAllocationGroups(&tmp);
+  for (size_t i = 0; i < tmp.size(); ++i) {
+    output->insert(std::make_pair(tmp[i]->name(), tmp[i]));
+  }
+}
+
+void MemoryTrackerImpl::Accept(AllocationVisitor* visitor) {
+  DisableMemoryTrackingInScope no_mem_tracking(this);
+  atomic_allocation_map_.Accept(visitor);
+}
+
+void MemoryTrackerImpl::Clear() {
+  // Prevent clearing of the tree from triggering a re-entrant
+  // memory deallocation.
+  atomic_allocation_map_.Clear();
+  total_bytes_allocated_.store(0);
+}
+
+void MemoryTrackerImpl::Debug_PushAllocationGroupBreakPointByName(
+    const char* group_name) {
+  DisableMemoryTrackingInScope no_tracking(this);
+  SB_DCHECK(group_name != NULL);
+  AllocationGroup* group = alloc_group_map_.Ensure(group_name);
+  Debug_PushAllocationGroupBreakPoint(group);
+}
+
+void MemoryTrackerImpl::Debug_PushAllocationGroupBreakPoint(
+    AllocationGroup* alloc_group) {
+  DisableMemoryTrackingInScope no_tracking(this);
+  allocation_group_stack_tls_.GetOrCreate()->Push_DebugBreak(alloc_group);
+}
+
+void MemoryTrackerImpl::Debug_PopAllocationGroupBreakPoint() {
+  DisableMemoryTrackingInScope no_tracking(this);
+  allocation_group_stack_tls_.GetOrCreate()->Pop_DebugBreak();
+}
+
+// Converts "2345.54" => "2,345.54".
+std::string InsertCommasIntoNumberString(const std::string& input) {
+  typedef std::vector<char> CharVector;
+  typedef CharVector::iterator CharIt;
+
+  CharVector chars(input.begin(), input.end());
+  std::reverse(chars.begin(), chars.end());
+
+  CharIt curr_it = chars.begin();
+  CharIt mid = std::find(chars.begin(), chars.end(), '.');
+  if (mid == chars.end()) {
+    mid = curr_it;
+  }
+
+  CharVector out(curr_it, mid);
+
+  int counter = 0;
+  for (CharIt it = mid; it != chars.end(); ++it) {
+    if (counter != 0 && (counter % 3 == 0)) {
+      out.push_back(',');
+    }
+    if (*it != '.') {
+      counter++;
+    }
+    out.push_back(*it);
+  }
+
+  std::reverse(out.begin(), out.end());
+  std::stringstream ss;
+  for (int i = 0; i < out.size(); ++i) {
+    ss << out[i];
+  }
+  return ss.str();
+}
+
+template <typename T>
+std::string NumberFormatWithCommas(T val) {
+  // Convert value to string.
+  std::stringstream ss;
+  ss << val;
+  std::string s = InsertCommasIntoNumberString(ss.str());
+  return s;
+}
+
+void MemoryTrackerImpl::OnMalloc(void* context,
+                                 const void* memory,
+                                 size_t size) {
+  MemoryTrackerImpl* t = static_cast<MemoryTrackerImpl*>(context);
+  t->AddMemoryTracking(memory, size);
+}
+
+void MemoryTrackerImpl::OnDealloc(void* context, const void* memory) {
+  MemoryTrackerImpl* t = static_cast<MemoryTrackerImpl*>(context);
+  t->RemoveMemoryTracking(memory);
+}
+
+void MemoryTrackerImpl::OnMapMem(void* context,
+                                 const void* memory,
+                                 size_t size) {
+  // We might do something more interesting with MapMemory calls later.
+  OnMalloc(context, memory, size);
+}
+
+void MemoryTrackerImpl::OnUnMapMem(void* context,
+                                   const void* memory,
+                                   size_t size) {
+  // We might do something more interesting with UnMapMemory calls later.
+  OnDealloc(context, memory);
+}
+
+void MemoryTrackerImpl::OnPushAllocationGroup(
+    void* context,
+    NbMemoryScopeInfo* memory_scope_info) {
+  MemoryTrackerImpl* t = static_cast<MemoryTrackerImpl*>(context);
+  uintptr_t* cached_handle = &(memory_scope_info->cached_handle_);
+  const bool allows_caching = memory_scope_info->allows_caching_;
+  const char* group_name = memory_scope_info->memory_scope_name_;
+
+  AllocationGroup* group = NULL;
+  if (allows_caching && *cached_handle != 0) {
+    group = reinterpret_cast<AllocationGroup*>(cached_handle);
+  } else {
+    group = t->GetAllocationGroup(group_name);
+    if (allows_caching) {
+      // Flush all pending writes so that the the pointee is well formed
+      // by the time the pointer becomes visible to other threads.
+      SbAtomicMemoryBarrier();
+      *cached_handle = reinterpret_cast<uintptr_t>(group);
+    }
+  }
+
+  t->PushAllocationGroup(group);
+}
+
+void MemoryTrackerImpl::OnPopAllocationGroup(void* context) {
+  MemoryTrackerImpl* t = static_cast<MemoryTrackerImpl*>(context);
+  t->PopAllocationGroup();
+}
+
+void MemoryTrackerImpl::Initialize(
+    SbMemoryReporter* sb_memory_reporter,
+    NbMemoryScopeReporter* memory_scope_reporter) {
+  SbMemoryReporter mem_reporter = {
+      MemoryTrackerImpl::OnMalloc, MemoryTrackerImpl::OnDealloc,
+
+      MemoryTrackerImpl::OnMapMem, MemoryTrackerImpl::OnUnMapMem,
+
+      this};
+
+  NbMemoryScopeReporter mem_scope_reporter = {
+      MemoryTrackerImpl::OnPushAllocationGroup,
+      MemoryTrackerImpl::OnPopAllocationGroup,
+
+      this,
+  };
+
+  *sb_memory_reporter = mem_reporter;
+  *memory_scope_reporter = mem_scope_reporter;
+}
+
+void MemoryTrackerImpl::SetThreadFilter(SbThreadId tid) {
+  thread_filter_id_ = tid;
+}
+
+bool MemoryTrackerImpl::IsCurrentThreadAllowedToReport() const {
+  if (thread_filter_id_ == kSbThreadInvalidId) {
+    return true;
+  }
+  return SbThreadGetId() == thread_filter_id_;
+}
+
+MemoryTrackerImpl::DisableDeletionInScope::DisableDeletionInScope(
+    MemoryTrackerImpl* owner)
+    : owner_(owner) {
+  prev_state_ = owner->MemoryDeletionEnabled();
+  owner_->SetMemoryDeletionEnabled(false);
+}
+
+MemoryTrackerImpl::DisableDeletionInScope::~DisableDeletionInScope() {
+  owner_->SetMemoryDeletionEnabled(prev_state_);
+}
+
+// TODO: Get rid of the nb::SimpleThread
+class MemoryTrackerPrintThread : public SimpleThread {
+ public:
+  MemoryTrackerPrintThread(MemoryTracker* owner)
+      : SimpleThread("MemoryTrackerPrintThread"),
+        finished_(false),
+        owner_(owner) {}
+
+  // Overridden so that the thread can exit gracefully.
+  virtual void Cancel() SB_OVERRIDE { finished_.store(true); }
+
+  virtual void Run() {
+    struct NoMemTracking {
+      NoMemTracking(MemoryTracker* owner) : owner_(owner) {
+        prev_val_ = owner_->IsMemoryTrackingEnabled();
+        owner_->SetMemoryTrackingEnabled(false);
+      }
+      ~NoMemTracking() { owner_->SetMemoryTrackingEnabled(prev_val_); }
+
+      bool prev_val_;
+      MemoryTracker* owner_;
+    };
+
+    while (!finished_.load()) {
+      NoMemTracking no_mem_tracking_in_this_scope(owner_);
+
+      // std::map<std::string, const AllocationGroup*> output;
+      // typedef std::map<std::string, const AllocationGroup*>::const_iterator
+      // MapIt;
+      std::vector<const AllocationGroup*> vector_output;
+      owner_->GetAllocationGroups(&vector_output);
+
+      typedef std::map<std::string, const AllocationGroup*> Map;
+      typedef Map::const_iterator MapIt;
+
+      Map output;
+      for (int i = 0; i < vector_output.size(); ++i) {
+        const AllocationGroup* group = vector_output[i];
+        output[group->name()] = group;
+      }
+
+      int32_t num_allocs = 0;
+      int64_t total_bytes = 0;
+
+      struct F {
+        static void PrintRow(std::stringstream& ss,
+                             const std::string& v1,
+                             const std::string& v2,
+                             const std::string& v3) {
+          ss.width(20);
+          ss << std::left << v1;
+          ss.width(13);
+          ss << std::right << v2 << "  ";
+          ss.width(7);
+          ss << std::right << v3 << "\n";
+        }
+      };
+
+      if (owner_->IsMemoryTrackingEnabled()) {
+        // If this isn't true then it would cause an infinite loop. The
+        // following will likely crash.
+        SB_DCHECK(false) << "Unexpected, memory tracking should be disabled.";
+      }
+
+      std::stringstream ss;
+      for (MapIt it = output.begin(); it != output.end(); ++it) {
+        const AllocationGroup* group = it->second;
+        if (!group) {
+          continue;
+        }
+
+        int32_t num_group_allocs = -1;
+        int64_t total_group_bytes = -1;
+
+        group->GetAggregateStats(&num_group_allocs, &total_group_bytes);
+        SB_DCHECK(-1 != num_group_allocs);
+        SB_DCHECK(-1 != total_group_bytes);
+        num_allocs += num_group_allocs;
+        total_bytes += total_group_bytes;
+
+        ss.width(20);
+        ss << std::left << it->first;
+        ss.width(13);
+        ss << std::right << NumberFormatWithCommas(total_group_bytes) << "  ";
+        ss.width(7);
+        ss << std::right << NumberFormatWithCommas(num_group_allocs) << "\n";
+      }
+      ss << "-------------------------------\n";
+
+      SB_LOG(INFO) << "\n"
+                   << "Total Bytes Allocated: "
+                   << NumberFormatWithCommas(total_bytes) << "\n"
+                   << "Total allocations: "
+                   << NumberFormatWithCommas(num_allocs) << "\n\n" << ss.str();
+
+      SbThreadSleep(250);
+    }
+  }
+
+ private:
+  atomic_bool finished_;
+  MemoryTracker* owner_;
+};
+
+void MemoryTrackerImpl::Debug_EnablePrintOutThread() {
+  if (debug_output_thread_) {
+    return;
+  }  // Already enabled.
+  debug_output_thread_.reset(new MemoryTrackerPrintThread(this));
+  debug_output_thread_->Start();
+}
+
+MemoryTrackerImpl::MemoryTrackerImpl()
+    : thread_filter_id_(kSbThreadInvalidId) {
+  total_bytes_allocated_.store(0);
+  global_hooks_installed_ = false;
+  Initialize(&sb_memory_tracker_, &nb_memory_scope_reporter_);
+  // Push the default region so that stats can be accounted for.
+  PushAllocationGroup(alloc_group_map_.GetDefaultUnaccounted());
+}
+
+MemoryTrackerImpl::~MemoryTrackerImpl() {
+  // If we are currently hooked into allocation tracking...
+  if (global_hooks_installed_) {
+    SbMemorySetReporter(NULL);
+    // For performance reasons no locking is used on the tracker.
+    // Therefore give enough time for other threads to exit this tracker
+    // before fully destroying this object.
+    SbThreadSleep(250 * kSbTimeMillisecond);  // 250 millisecond wait.
+  }
+  if (debug_output_thread_) {
+    debug_output_thread_->Join();
+    debug_output_thread_.reset();
+  }
+}
+
+bool MemoryTrackerImpl::AddMemoryTracking(const void* memory, size_t size) {
+  // Vars are stored to assist in debugging.
+  const bool thread_allowed_to_report = IsCurrentThreadAllowedToReport();
+  const bool valid_memory_request = (memory != NULL) && (size != 0);
+  const bool mem_track_enabled = IsMemoryTrackingEnabled();
+
+  const bool tracking_enabled =
+      mem_track_enabled && valid_memory_request && thread_allowed_to_report;
+
+  if (!tracking_enabled) {
+    return false;
+  }
+
+  // End all memory tracking in subsequent data structures.
+  DisableMemoryTrackingInScope no_memory_tracking(this);
+  AllocationGroupStack* alloc_stack =
+      allocation_group_stack_tls_.GetOrCreate();
+  AllocationGroup* group = alloc_stack->Peek();
+  if (!group) {
+    group = alloc_group_map_.GetDefaultUnaccounted();
+  }
+
+#ifndef NDEBUG
+  // This section of the code is designed to allow a developer to break
+  // execution whenever the debug allocation stack is in scope, so that the
+  // allocations can be stepped through.
+  // Example:
+  //   Debug_PushAllocationGroupBreakPointByName("Javascript");
+  //  ...now set a break point below at "static int i = 0"
+  if (group && (group == alloc_stack->Peek_DebugBreak()) &&
+      alloc_stack->Peek_DebugBreak()) {
+    static int i = 0;  // This static is here to allow an
+    ++i;               // easy breakpoint in the debugger
+  }
+#endif
+
+  AllocationRecord alloc_record(size, group);
+  bool added = atomic_allocation_map_.Add(memory, alloc_record);
+  if (added) {
+    AddAllocationBytes(size);
+    group->AddAllocation(size);
+  } else {
+    AllocationRecord unexpected_alloc;
+    atomic_allocation_map_.Get(memory, &unexpected_alloc);
+    AllocationGroup* prev_group = unexpected_alloc.allocation_group;
+
+    std::string prev_group_name;
+    if (prev_group) {
+      prev_group_name = unexpected_alloc.allocation_group->name();
+    } else {
+      prev_group_name = "none";
+    }
+
+    SB_DCHECK(added)
+        << "\nUnexpected condition, previous allocation was not removed:\n"
+        << "\tprevious alloc group: " << prev_group_name << "\n"
+        << "\tnew alloc group: " << group->name() << "\n"
+        << "\tprevious size: " << unexpected_alloc.size << "\n"
+        << "\tnew size: " << size << "\n";
+  }
+  return added;
+}
+
+size_t MemoryTrackerImpl::RemoveMemoryTracking(const void* memory) {
+  const bool do_remove = memory && MemoryDeletionEnabled();
+  if (!do_remove) {
+    return 0;
+  }
+
+  AllocationRecord alloc_record;
+  bool removed = false;
+
+  // Prevent a map::erase() from causing an endless stack overflow by
+  // disabling memory deletion for the very limited scope.
+  {
+    // Not a correct name. TODO - change.
+    DisableDeletionInScope no_memory_deletion(this);
+    removed = atomic_allocation_map_.Remove(memory, &alloc_record);
+  }
+
+  if (!removed) {
+    return 0;
+  } else {
+    const int64_t alloc_size = (static_cast<int64_t>(alloc_record.size));
+    AllocationGroup* group = alloc_record.allocation_group;
+    if (group) {
+      group->AddAllocation(-alloc_size);
+    }
+    AddAllocationBytes(-alloc_size);
+    return alloc_record.size;
+  }
+}
+
+bool MemoryTrackerImpl::GetMemoryTracking(const void* memory,
+                                          AllocationRecord* record) const {
+  const bool exists = atomic_allocation_map_.Get(memory, record);
+  return exists;
+}
+
+void MemoryTrackerImpl::SetMemoryTrackingEnabled(bool on) {
+  memory_tracking_disabled_tls_.Set(!on);
+}
+
+bool MemoryTrackerImpl::IsMemoryTrackingEnabled() const {
+  const bool enabled = !memory_tracking_disabled_tls_.Get();
+  return enabled;
+}
+
+void MemoryTrackerImpl::AddAllocationBytes(int64_t val) {
+  total_bytes_allocated_.fetch_add(val);
+}
+
+bool MemoryTrackerImpl::MemoryDeletionEnabled() const {
+  return !memory_deletion_enabled_tls_.Get();
+}
+
+void MemoryTrackerImpl::SetMemoryDeletionEnabled(bool on) {
+  memory_deletion_enabled_tls_.Set(!on);
+}
+
+MemoryTrackerImpl::DisableMemoryTrackingInScope::DisableMemoryTrackingInScope(
+    MemoryTrackerImpl* t)
+    : owner_(t) {
+  prev_value_ = owner_->IsMemoryTrackingEnabled();
+  owner_->SetMemoryTrackingEnabled(false);
+}
+
+MemoryTrackerImpl::DisableMemoryTrackingInScope::
+    ~DisableMemoryTrackingInScope() {
+  owner_->SetMemoryTrackingEnabled(prev_value_);
+}
+
+}  // namespace analytics
+}  // namespace nb
diff --git a/src/nb/analytics/memory_tracker_impl.h b/src/nb/analytics/memory_tracker_impl.h
new file mode 100644
index 0000000..7508a71
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_impl.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NB_MEMORY_TRACKER_IMPL_H_
+#define NB_MEMORY_TRACKER_IMPL_H_
+
+#include "nb/analytics/memory_tracker_helpers.h"
+#include "nb/analytics/memory_tracker.h"
+#include "nb/memory_scope.h"
+#include "nb/scoped_ptr.h"
+#include "nb/thread_local_object.h"
+#include "starboard/configuration.h"
+#include "starboard/memory_reporter.h"
+#include "starboard/memory.h"
+#include "starboard/mutex.h"
+
+namespace nb {
+namespace analytics {
+
+class MemoryTrackerImpl : public MemoryTracker {
+ public:
+  typedef ConcurrentAllocationMap AllocationMapType;
+
+  MemoryTrackerImpl();
+  virtual ~MemoryTrackerImpl();
+
+  // MemoryTracker adapter which is compatible with the SbMemoryReporter
+  // interface.
+  SbMemoryReporter* GetMemoryReporter();
+  NbMemoryScopeReporter* GetMemoryScopeReporter();
+
+  AllocationGroup* GetAllocationGroup(const char* name);
+  // Declares the start of a memory region. After this call, all
+  // memory regions will be tagged with this allocation group.
+  // Note that AllocationGroup is a tracking characteristic and
+  // does not imply any sort of special allocation pool.
+  void PushAllocationGroupByName(const char* group_name);
+  void PushAllocationGroup(AllocationGroup* alloc_group);
+  AllocationGroup* PeekAllocationGroup();
+  // Ends the current memory region and the previous memory region
+  // is restored.
+  void PopAllocationGroup();
+
+  // CONTROL
+  //
+  // Adds tracking to the supplied memory pointer. An AllocationRecord is
+  // generated for the supplied allocation which can be queried immediately
+  // with GetMemoryTracking(...).
+  bool InstallGlobalTrackingHooks() SB_OVERRIDE {
+    global_hooks_installed_ = true;
+    bool ok = SbMemorySetReporter(GetMemoryReporter());
+    ok |= NbSetMemoryScopeReporter(GetMemoryScopeReporter());
+    return ok;
+  }
+  void RemoveGlobalTrackingHooks() SB_OVERRIDE {
+    SbMemorySetReporter(NULL);
+    NbSetMemoryScopeReporter(NULL);
+  }
+
+  bool AddMemoryTracking(const void* memory, size_t size) SB_OVERRIDE;
+  size_t RemoveMemoryTracking(const void* memory) SB_OVERRIDE;
+  // Returns true if the allocation record was successfully found.
+  // If true then the output will be written to with the values.
+  // Otherwise the output is reset to the empty AllocationRecord.
+  bool GetMemoryTracking(const void* memory,
+                         AllocationRecord* record) const SB_OVERRIDE;
+  // Thread local function to get and set the memory tracking state. When set
+  // to disabled then memory allocations are not recorded. However memory
+  // deletions are still recorded.
+  void SetMemoryTrackingEnabled(bool on) SB_OVERRIDE;
+  bool IsMemoryTrackingEnabled() const SB_OVERRIDE;
+
+  // REPORTING
+  //
+  // Total allocation bytes that have been allocated by this
+  // MemoryTrackerImpl.
+  int64_t GetTotalAllocationBytes() SB_OVERRIDE;
+  // Retrieves a collection of all known allocation groups. Locking is done
+  // internally.
+  void GetAllocationGroups(std::vector<const AllocationGroup*>* output)
+      SB_OVERRIDE;
+  // Retrieves a collection of all known allocation groups. Locking is done
+  // internally. The output is a map of names to AllocationGroups.
+  void GetAllocationGroups(
+      std::map<std::string, const AllocationGroup*>* output);
+
+  // Provides access to the internal allocations in a thread safe way.
+  // Allocation tracking is disabled in the current thread for the duration
+  // of the visitation.
+  void Accept(AllocationVisitor* visitor) SB_OVERRIDE;
+
+  int64_t GetTotalNumberOfAllocations() SB_OVERRIDE {
+    return pointer_map()->Size();
+  }
+
+  // TESTING.
+  AllocationMapType* pointer_map() { return &atomic_allocation_map_; }
+  void Clear();
+
+  // This is useful for debugging. Allows the developer to set a breakpoint
+  // and see only allocations that are in the defined allocation group. This
+  // is only active in the current thread.
+  void Debug_PushAllocationGroupBreakPointByName(const char* group_name);
+  void Debug_PushAllocationGroupBreakPoint(AllocationGroup* alloc_group);
+  void Debug_PopAllocationGroupBreakPoint();
+
+  // This is useful for testing, setting this to a thread will allow ONLY
+  // those allocations from the set thread.
+  // Setting this to kSbThreadInvalidId (default) allows all threads to report
+  // allocations.
+  void SetThreadFilter(SbThreadId tid);
+  bool IsCurrentThreadAllowedToReport() const;
+  // Spawns a thread who's lifetime is coupled to that of this owning
+  // MemoryTrackerImpl. This thread will output the state of the memory
+  // periodically.
+  void Debug_EnablePrintOutThread();
+
+ private:
+  struct DisableMemoryTrackingInScope {
+    DisableMemoryTrackingInScope(MemoryTrackerImpl* t);
+    ~DisableMemoryTrackingInScope();
+    MemoryTrackerImpl* owner_;
+    bool prev_value_;
+  };
+
+  // Disables all memory deletion in the current scope. This is used in one
+  // location.
+  struct DisableDeletionInScope {
+    DisableDeletionInScope(MemoryTrackerImpl* owner);
+    ~DisableDeletionInScope();
+    MemoryTrackerImpl* owner_;
+    bool prev_state_;
+  };
+
+  // These are functions that are used specifically SbMemoryReporter.
+  static void OnMalloc(void* context, const void* memory, size_t size);
+  static void OnDealloc(void* context, const void* memory);
+  static void OnMapMem(void* context, const void* memory, size_t size);
+  static void OnUnMapMem(void* context, const void* memory, size_t size);
+  static void OnPushAllocationGroup(void* context,
+                                    NbMemoryScopeInfo* memory_scope_info);
+  static void OnPopAllocationGroup(void* context);
+
+  void Initialize(SbMemoryReporter* memory_reporter,
+                  NbMemoryScopeReporter* nb_memory_scope_reporter);
+  void AddAllocationBytes(int64_t val);
+  bool MemoryDeletionEnabled() const;
+
+  void SetMemoryDeletionEnabled(bool on);
+
+  SbMemoryReporter sb_memory_tracker_;
+  NbMemoryScopeReporter nb_memory_scope_reporter_;
+  SbThreadId thread_filter_id_;
+
+  AllocationMapType atomic_allocation_map_;
+  AtomicStringAllocationGroupMap alloc_group_map_;
+
+  atomic_int64_t total_bytes_allocated_;
+  scoped_ptr<SimpleThread> debug_output_thread_;
+
+  // THREAD LOCAL SECTION.
+  ThreadLocalBoolean memory_deletion_enabled_tls_;
+  ThreadLocalBoolean memory_tracking_disabled_tls_;
+  ThreadLocalObject<AllocationGroupStack> allocation_group_stack_tls_;
+  bool global_hooks_installed_;
+};
+
+}  // namespace analytics
+}  // namespace nb
+
+#endif  // NB_MEMORY_TRACKER_IMPL_H_
diff --git a/src/nb/analytics/memory_tracker_impl_test.cc b/src/nb/analytics/memory_tracker_impl_test.cc
new file mode 100644
index 0000000..8236447
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_impl_test.cc
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/analytics/memory_tracker_impl.h"
+#include "nb/memory_scope.h"
+#include "nb/scoped_ptr.h"
+#include "nb/test_thread.h"
+#include "starboard/system.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define STRESS_TEST_DURATION_SECONDS 1
+#define NUM_STRESS_TEST_THREADS 3
+
+namespace nb {
+namespace analytics {
+namespace {
+
+MemoryTrackerImpl* s_memory_tracker_ = NULL;
+
+struct NoMemTracking {
+  bool prev_val;
+  NoMemTracking() : prev_val(false) {
+    if (s_memory_tracker_) {
+      prev_val = s_memory_tracker_->IsMemoryTrackingEnabled();
+      s_memory_tracker_->SetMemoryTrackingEnabled(false);
+    }
+  }
+  ~NoMemTracking() {
+    if (s_memory_tracker_) {
+      s_memory_tracker_->SetMemoryTrackingEnabled(prev_val);
+    }
+  }
+};
+
+// EXPECT_XXX and ASSERT_XXX allocate memory, a big no-no when
+// for unit testing allocations. These overrides disable memory
+// tracking for the duration of the EXPECT and ASSERT operations.
+#define EXPECT_EQ_NO_TRACKING(A, B)                 \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    EXPECT_EQ(A, B);                                \
+  }
+
+#define EXPECT_TRUE_NO_TRACKING(A)                  \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    EXPECT_TRUE(A);                                 \
+  }
+
+#define EXPECT_FALSE_NO_TRACKING(A)                 \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    EXPECT_FALSE(A);                                \
+  }
+
+#define ASSERT_EQ_NO_TRACKING(A, B)                 \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    ASSERT_EQ(A, B);                                \
+  }
+
+#define ASSERT_TRUE_NO_TRACKING(A)                  \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    ASSERT_TRUE(A);                                 \
+  }
+
+// !! converts int -> bool.
+bool FlipCoin() {
+  return !!(SbSystemGetRandomUInt64() & 0x1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Stress testing the Allocation Tracker.
+class MemoryScopeThread : public nb::TestThread {
+ public:
+  typedef nb::TestThread Super;
+
+  explicit MemoryScopeThread(MemoryTrackerImpl* memory_tracker)
+      : memory_tracker_(memory_tracker) {
+    static int s_counter = 0;
+
+    std::stringstream ss;
+    ss << "MemoryScopeThread_" << s_counter++;
+    unique_name_ = ss.str();
+  }
+  virtual ~MemoryScopeThread() {}
+
+  // Overridden so that the thread can exit gracefully.
+  virtual void Join() {
+    finished_ = true;
+    Super::Join();
+  }
+  virtual void Run() {
+    while (!finished_) {
+      TRACK_MEMORY_SCOPE_DYNAMIC(unique_name_.c_str());
+      AllocationGroup* group = memory_tracker_->PeekAllocationGroup();
+
+      const int cmp_result = group->name().compare(unique_name_);
+      if (cmp_result != 0) {
+        GTEST_FAIL() << "unique name mismatch";
+        return;
+      }
+    }
+  }
+
+ private:
+  MemoryTrackerImpl* memory_tracker_;
+  bool finished_;
+  std::string unique_name_;
+  int do_delete_counter_;
+  int do_malloc_counter_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Stress testing the Allocation Tracker.
+class AllocationStressThread : public nb::TestThread {
+ public:
+  explicit AllocationStressThread(MemoryTrackerImpl* memory_tracker);
+  virtual ~AllocationStressThread();
+
+  // Overridden so that the thread can exit gracefully.
+  virtual void Join();
+  virtual void Run();
+
+ private:
+  typedef std::map<const void*, AllocationRecord> AllocMap;
+
+  void CheckPointers();
+  bool RemoveRandomAllocation(std::pair<const void*, AllocationRecord>* output);
+  bool DoDelete();
+  void DoMalloc();
+
+  MemoryTrackerImpl* memory_tracker_;
+  bool finished_;
+  std::map<const void*, AllocationRecord> allocated_pts_;
+  std::string unique_name_;
+  int do_delete_counter_;
+  int do_malloc_counter_;
+};
+
+class AddAllocationStressThread : public nb::TestThread {
+ public:
+  typedef std::map<const void*, AllocationRecord> AllocMap;
+
+  AddAllocationStressThread(MemoryTracker* memory_tracker,
+                            int num_elements_add,
+                            AllocMap* destination_map,
+                            starboard::Mutex* destination_map_mutex)
+      : memory_tracker_(memory_tracker),
+        num_elements_to_add_(num_elements_add),
+        destination_map_(destination_map),
+        destination_map_mutex_(destination_map_mutex) {}
+
+  virtual void Run() {
+    for (int i = 0; i < num_elements_to_add_; ++i) {
+      const int alloc_size = std::rand() % 100 + 8;
+      void* ptr = SbMemoryAllocate(alloc_size);
+
+      AllocationRecord record;
+      if (memory_tracker_->GetMemoryTracking(ptr, &record)) {
+        NoMemTracking no_mem_tracking;  // simplifies test.
+
+        starboard::ScopedLock lock(*destination_map_mutex_);
+        destination_map_->insert(std::make_pair(ptr, record));
+      } else {
+        ADD_FAILURE_AT(__FILE__, __LINE__) << "Could not add pointer.";
+      }
+      if (FlipCoin()) {
+        SbThreadYield();  // Give other threads a chance to run.
+      }
+    }
+  }
+
+ private:
+  MemoryTracker* memory_tracker_;
+  AllocMap* destination_map_;
+  starboard::Mutex* destination_map_mutex_;
+  int num_elements_to_add_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Framework which initializes the MemoryTracker once and installs it
+// for the first test and the removes the MemoryTracker after the
+// the last test finishes.
+class MemoryTrackerImplTest : public ::testing::Test {
+ public:
+  typedef MemoryTrackerImpl::AllocationMapType AllocationMapType;
+  MemoryTrackerImplTest() {}
+
+  MemoryTrackerImpl* memory_tracker() { return s_memory_tracker_; }
+
+  bool GetAllocRecord(void* alloc_memory, AllocationRecord* output) {
+    return memory_tracker()->GetMemoryTracking(alloc_memory, output);
+  }
+
+  AllocationMapType* pointer_map() { return memory_tracker()->pointer_map(); }
+
+  size_t NumberOfAllocations() {
+    AllocationMapType* map = pointer_map();
+    return map->Size();
+  }
+
+  int64_t TotalAllocationBytes() {
+    return memory_tracker()->GetTotalAllocationBytes();
+  }
+
+  bool MemoryTrackerEnabled() const { return s_memory_tracker_enabled_; }
+
+ protected:
+  static void SetUpTestCase() {
+    if (!s_memory_tracker_) {
+      s_memory_tracker_ = new MemoryTrackerImpl;
+      s_memory_tracker_enabled_ =
+          s_memory_tracker_->InstallGlobalTrackingHooks();
+    }
+  }
+  static void TearDownTestCase() { SbMemorySetReporter(NULL); }
+
+  virtual void SetUp() {
+    memory_tracker()->Clear();
+    memory_tracker()->SetThreadFilter(SbThreadGetId());
+  }
+
+  virtual void TearDown() { memory_tracker()->Clear(); }
+  static bool s_memory_tracker_enabled_;
+};
+bool MemoryTrackerImplTest::s_memory_tracker_enabled_ = false;
+
+///////////////////////////////////////////////////////////////////////////////
+// MemoryTrackerImplTest
+TEST_F(MemoryTrackerImplTest, NoMemTracking) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  ASSERT_EQ_NO_TRACKING(0, NumberOfAllocations());
+  scoped_ptr<int> dummy(new int());
+  EXPECT_EQ_NO_TRACKING(1, NumberOfAllocations());
+  {
+    // Now that memory allocation is disabled, no more allocations should
+    // be recorded.
+    NoMemTracking no_memory_tracking_in_this_scope;
+    int* dummy2 = new int();
+    EXPECT_EQ_NO_TRACKING(1, NumberOfAllocations());
+    delete dummy2;
+    EXPECT_EQ_NO_TRACKING(1, NumberOfAllocations());
+  }
+  scoped_ptr<int> dummy2(new int());
+  EXPECT_EQ_NO_TRACKING(2, NumberOfAllocations());
+  dummy.reset(NULL);
+  EXPECT_EQ_NO_TRACKING(1, NumberOfAllocations());
+  dummy2.reset(NULL);
+  EXPECT_EQ_NO_TRACKING(0, NumberOfAllocations());
+}
+
+TEST_F(MemoryTrackerImplTest, RemovePointerOnNoMemoryTracking) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+
+  int* int_ptr = new int();
+  {
+    NoMemTracking no_memory_tracking_in_this_scope;
+    delete int_ptr;
+  }
+  EXPECT_FALSE_NO_TRACKING(pointer_map()->Get(int_ptr, NULL));
+}
+
+TEST_F(MemoryTrackerImplTest, NewDeleteOverridenTest) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  EXPECT_EQ_NO_TRACKING(0, NumberOfAllocations());
+  int* int_a = new int(0);
+  EXPECT_EQ_NO_TRACKING(1, NumberOfAllocations());
+  delete int_a;
+  EXPECT_EQ_NO_TRACKING(0, NumberOfAllocations());
+}
+
+TEST_F(MemoryTrackerImplTest, TotalAllocationBytes) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  int32_t* int_a = new int32_t(0);
+  EXPECT_EQ_NO_TRACKING(1, NumberOfAllocations());
+  EXPECT_EQ_NO_TRACKING(4, TotalAllocationBytes());
+  delete int_a;
+  EXPECT_EQ_NO_TRACKING(0, NumberOfAllocations());
+}
+
+// Tests the expectation that a lot of allocations can be executed and that
+// internal data structures won't overflow.
+TEST_F(MemoryTrackerImplTest, NoStackOverflow) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  static const int kNumAllocations = 1000;
+  std::vector<int*> allocations;
+
+  // Also it turns out that this test is great for catching
+  // background threads pushing allocations through the allocator.
+  // This is supposed to be filtered, but if it's not then this test will
+  // fail.
+  // SbThreadYield() is used to give other threads a chance to enter into our
+  // allocator and catch a test failure.
+  SbThreadSleep(1);
+  ASSERT_EQ_NO_TRACKING(0, NumberOfAllocations());
+
+  for (int i = 0; i < kNumAllocations; ++i) {
+    SbThreadYield();
+    EXPECT_EQ_NO_TRACKING(i, NumberOfAllocations());
+    int* val = new int(0);
+    NoMemTracking no_tracking_in_scope;
+    allocations.push_back(val);
+  }
+
+  EXPECT_EQ_NO_TRACKING(kNumAllocations, NumberOfAllocations());
+
+  for (int i = 0; i < kNumAllocations; ++i) {
+    SbThreadYield();
+    EXPECT_EQ_NO_TRACKING(kNumAllocations - i, NumberOfAllocations());
+    delete allocations[i];
+  }
+
+  EXPECT_EQ_NO_TRACKING(0, NumberOfAllocations());
+}
+
+// Tests the expectation that the macros will push/pop the memory scope.
+TEST_F(MemoryTrackerImplTest, MacrosPushPop) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  scoped_ptr<int> dummy;
+  {
+    TRACK_MEMORY_SCOPE("TestAllocations");
+    dummy.reset(new int());
+  }
+
+  scoped_ptr<int> dummy2(new int());
+
+  AllocationRecord alloc_rec;
+  pointer_map()->Get(dummy.get(), &alloc_rec);
+
+  ASSERT_TRUE_NO_TRACKING(alloc_rec.allocation_group);
+  EXPECT_EQ_NO_TRACKING(std::string("TestAllocations"),
+                        alloc_rec.allocation_group->name());
+
+  pointer_map()->Get(dummy2.get(), &alloc_rec);
+
+  ASSERT_TRUE_NO_TRACKING(alloc_rec.allocation_group);
+  EXPECT_EQ_NO_TRACKING(std::string("Unaccounted"),
+                        alloc_rec.allocation_group->name());
+}
+
+// Tests the expectation that if the cached flag on the NbMemoryScopeInfo is
+// set to false that the caching of the handle is not performed.
+TEST_F(MemoryTrackerImplTest, RespectsNonCachedHandle) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  const bool kCaching = false;
+  NbMemoryScopeInfo memory_scope = {
+      0,        "MyName",     __FILE__,
+      __LINE__, __FUNCTION__, false};  // false to disallow caching.
+
+  // Pushing the memory scope should trigger the caching operation to be
+  // attempted. However, because caching was explicitly disabled this handle
+  // should retain the value of 0.
+  NbPushMemoryScope(&memory_scope);
+  EXPECT_EQ_NO_TRACKING(memory_scope.cached_handle_, uintptr_t(0));
+
+  // ... and still assert that the group was created with the expected name.
+  AllocationGroup* group = memory_tracker()->GetAllocationGroup("MyName");
+  // Equality check.
+  EXPECT_EQ_NO_TRACKING(0, group->name().compare("MyName"));
+  NbPopMemoryScope();
+}
+
+// Tests the expectation that if the cached flag on the NbMemoryScopeInfo is
+// set to true that the caching will be applied for the cached_handle of the
+// memory scope.
+TEST_F(MemoryTrackerImplTest, PushAllocGroupCachedHandle) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  NbMemoryScopeInfo memory_scope = {
+      0,         // Cached handle.
+      "MyName",  // Memory scope name.
+      __FILE__, __LINE__, __FUNCTION__,
+      true  // Allows caching.
+  };
+
+  NbPushMemoryScope(&memory_scope);
+  EXPECT_TRUE_NO_TRACKING(memory_scope.cached_handle_ != uintptr_t(0));
+  AllocationGroup* group = memory_tracker()->GetAllocationGroup("MyName");
+
+  EXPECT_TRUE_NO_TRACKING(memory_scope.cached_handle_ ==
+                          reinterpret_cast<uintptr_t>(group));
+}
+
+// Tests the expectation that the macro TRACK_MEMORY_SCOPE will capture the
+// allocation in the MemoryTrackerImpl.
+TEST_F(MemoryTrackerImplTest, MacrosGroupAccounting) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  MemoryTrackerImpl* track_alloc = memory_tracker();  // Debugging.
+  track_alloc->Clear();
+
+  memory_tracker()->Clear();
+  const AllocationGroup* group_a =
+      memory_tracker()->GetAllocationGroup("MemoryTrackerTest-ScopeA");
+
+  const AllocationGroup* group_b =
+      memory_tracker()->GetAllocationGroup("MemoryTrackerTest-ScopeB");
+
+  ASSERT_TRUE_NO_TRACKING(group_a);
+  ASSERT_TRUE_NO_TRACKING(group_b);
+
+  int32_t num_allocations = -1;
+  int64_t allocation_bytes = -1;
+
+  // Expect that both groups have no allocations in them.
+  group_a->GetAggregateStats(&num_allocations, &allocation_bytes);
+  EXPECT_EQ_NO_TRACKING(0, num_allocations);
+  EXPECT_EQ_NO_TRACKING(0, allocation_bytes);
+
+  group_b->GetAggregateStats(&num_allocations, &allocation_bytes);
+  EXPECT_EQ_NO_TRACKING(0, num_allocations);
+  EXPECT_EQ_NO_TRACKING(0, allocation_bytes);
+
+  scoped_ptr<int> alloc_a, alloc_b, alloc_b2;
+  {
+    TRACK_MEMORY_SCOPE("MemoryTrackerTest-ScopeA");
+    alloc_a.reset(new int());
+    {
+      TRACK_MEMORY_SCOPE("MemoryTrackerTest-ScopeB");
+      alloc_b.reset(new int());
+      alloc_b2.reset(new int());
+
+      group_a->GetAggregateStats(&num_allocations, &allocation_bytes);
+      EXPECT_EQ_NO_TRACKING(1, num_allocations);
+      EXPECT_EQ_NO_TRACKING(4, allocation_bytes);
+      alloc_a.reset(NULL);
+      group_a->GetAggregateStats(&num_allocations, &allocation_bytes);
+      EXPECT_EQ_NO_TRACKING(0, num_allocations);
+      EXPECT_EQ_NO_TRACKING(0, allocation_bytes);
+
+      num_allocations = allocation_bytes = -1;
+      group_b->GetAggregateStats(&num_allocations, &allocation_bytes);
+      EXPECT_EQ_NO_TRACKING(2, num_allocations);
+      EXPECT_EQ_NO_TRACKING(8, allocation_bytes);
+
+      alloc_b2.reset(NULL);
+      group_b->GetAggregateStats(&num_allocations, &allocation_bytes);
+      EXPECT_EQ_NO_TRACKING(1, num_allocations);
+      EXPECT_EQ_NO_TRACKING(4, allocation_bytes);
+
+      alloc_b.reset(NULL);
+      group_b->GetAggregateStats(&num_allocations, &allocation_bytes);
+      EXPECT_EQ_NO_TRACKING(0, num_allocations);
+      EXPECT_EQ_NO_TRACKING(0, allocation_bytes);
+    }
+  }
+}
+
+// Tests the expectation that the visitor can access the allocations.
+TEST_F(MemoryTrackerImplTest, VisitorAccess) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  class SimpleVisitor : public AllocationVisitor {
+   public:
+    SimpleVisitor() : num_memory_allocs_(0) {}
+    virtual bool Visit(const void* memory,
+                       const AllocationRecord& alloc_record) {
+      num_memory_allocs_++;
+      return true;  // Keep traversing.
+    }
+
+    size_t num_memory_allocs_;
+  };
+
+  SimpleVisitor visitor;
+  scoped_ptr<int> int_ptr(new int);
+
+  // Should see the int_ptr allocation.
+  memory_tracker()->Accept(&visitor);
+  EXPECT_EQ_NO_TRACKING(1, visitor.num_memory_allocs_);
+  visitor.num_memory_allocs_ = 0;
+
+  int_ptr.reset(NULL);
+  // Now no allocations should be available.
+  memory_tracker()->Accept(&visitor);
+  EXPECT_EQ_NO_TRACKING(0, visitor.num_memory_allocs_);
+}
+
+// A stress test that rapidly adds allocations, but saves all deletions
+// for the main thread. This test will catch concurrency errors related
+// to reporting new allocations.
+TEST_F(MemoryTrackerImplTest, MultiThreadedStressAddTest) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  // Disable allocation filtering.
+  memory_tracker()->SetThreadFilter(kSbThreadInvalidId);
+
+  std::vector<nb::TestThread*> threads;
+
+  const int kNumObjectsToAdd = 10000 / NUM_STRESS_TEST_THREADS;
+  AddAllocationStressThread::AllocMap map;
+  starboard::Mutex map_mutex;
+
+  for (int i = 0; i < NUM_STRESS_TEST_THREADS; ++i) {
+    nb::TestThread* thread = new AddAllocationStressThread(
+        memory_tracker(), kNumObjectsToAdd, &map, &map_mutex);
+
+    threads.push_back(thread);
+  }
+
+  for (int i = 0; i < NUM_STRESS_TEST_THREADS; ++i) {
+    threads[i]->Start();
+  }
+
+  for (int i = 0; i < NUM_STRESS_TEST_THREADS; ++i) {
+    threads[i]->Join();
+  }
+  for (int i = 0; i < NUM_STRESS_TEST_THREADS; ++i) {
+    delete threads[i];
+  }
+
+  while (!map.empty()) {
+    const void* ptr = map.begin()->first;
+    map.erase(map.begin());
+
+    if (!memory_tracker()->GetMemoryTracking(ptr, NULL)) {
+      ADD_FAILURE_AT(__FILE__, __LINE__) << "No tracking?!";
+    }
+
+    SbMemoryDeallocate(const_cast<void*>(ptr));
+    if (memory_tracker()->GetMemoryTracking(ptr, NULL)) {
+      ADD_FAILURE_AT(__FILE__, __LINE__) << "Tracking?!";
+    }
+  }
+}
+
+// Tests the expectation that memory scopes are multi-threaded safe.
+TEST_F(MemoryTrackerImplTest, MultiThreadedMemoryScope) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  memory_tracker()->SetThreadFilter(kSbThreadInvalidId);
+  TRACK_MEMORY_SCOPE("MultiThreadedStressUseTest");
+
+  std::vector<MemoryScopeThread*> threads;
+
+  for (int i = 0; i < NUM_STRESS_TEST_THREADS; ++i) {
+    threads.push_back(new MemoryScopeThread(memory_tracker()));
+  }
+
+  for (int i = 0; i < threads.size(); ++i) {
+    threads[i]->Start();
+  }
+
+  SbThreadSleep(STRESS_TEST_DURATION_SECONDS * 1000 * 1000);
+
+  for (int i = 0; i < threads.size(); ++i) {
+    threads[i]->Join();
+  }
+
+  for (int i = 0; i < threads.size(); ++i) {
+    delete threads[i];
+  }
+
+  threads.clear();
+}
+
+// Tests the expectation that new/delete can be done by different threads.
+TEST_F(MemoryTrackerImplTest, MultiThreadedStressUseTest) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+  // Disable allocation filtering.
+  memory_tracker()->SetThreadFilter(kSbThreadInvalidId);
+  TRACK_MEMORY_SCOPE("MultiThreadedStressUseTest");
+
+  std::vector<AllocationStressThread*> threads;
+
+  for (int i = 0; i < NUM_STRESS_TEST_THREADS; ++i) {
+    threads.push_back(new AllocationStressThread(memory_tracker()));
+  }
+
+  for (int i = 0; i < threads.size(); ++i) {
+    threads[i]->Start();
+  }
+
+  SbThreadSleep(STRESS_TEST_DURATION_SECONDS * 1000 * 1000);
+
+  for (int i = 0; i < threads.size(); ++i) {
+    threads[i]->Join();
+  }
+
+  for (int i = 0; i < threads.size(); ++i) {
+    delete threads[i];
+  }
+
+  threads.clear();
+}
+
+//////////////////////////// Implementation ///////////////////////////////////
+/// Impl of AllocationStressThread
+AllocationStressThread::AllocationStressThread(MemoryTrackerImpl* tracker)
+    : memory_tracker_(tracker), finished_(false) {
+  static int counter = 0;
+  std::stringstream ss;
+  ss << "AllocStressThread-" << counter++;
+  unique_name_ = ss.str();
+}
+
+AllocationStressThread::~AllocationStressThread() {
+  if (!allocated_pts_.empty()) {
+    ADD_FAILURE_AT(__FILE__, __LINE__) << "allocated pointers still exist";
+  }
+}
+
+void AllocationStressThread::Join() {
+  finished_ = true;
+  nb::TestThread::Join();
+}
+
+void AllocationStressThread::CheckPointers() {
+  typedef AllocMap::iterator Iter;
+
+  for (Iter it = allocated_pts_.begin(); it != allocated_pts_.end(); ++it) {
+    const void* ptr = it->first;
+    const bool found = memory_tracker_->GetMemoryTracking(ptr, NULL);
+    if (!found) {
+      NoMemTracking no_tracking_in_scope;
+      ADD_FAILURE_AT(__FILE__, __LINE__) << "Not found";
+    }
+  }
+}
+
+void AllocationStressThread::Run() {
+  while (!finished_) {
+    const bool do_delete = FlipCoin();
+    if (FlipCoin()) {
+      DoDelete();
+    } else {
+      DoMalloc();
+    }
+    CheckPointers();
+
+    // Randomly give other threads the opportunity run.
+    if (FlipCoin()) {
+      SbThreadYield();
+    }
+  }
+
+  // Clear out all memory.
+  while (DoDelete()) {
+    ;
+  }
+}
+
+bool AllocationStressThread::RemoveRandomAllocation(
+    std::pair<const void*, AllocationRecord>* output) {
+  if (allocated_pts_.empty()) {
+    return false;
+  }
+
+  // Select a random pointer to delete.
+  int idx = std::rand() % allocated_pts_.size();
+  AllocMap::iterator iter = allocated_pts_.begin();
+  while (idx > 0) {
+    idx--;
+    iter++;
+  }
+  output->first = iter->first;
+  output->second = iter->second;
+  allocated_pts_.erase(iter);
+  return true;
+}
+
+bool AllocationStressThread::DoDelete() {
+  NoMemTracking no_memory_tracking_in_this_scope;
+  ++do_delete_counter_;
+
+  std::pair<const void*, AllocationRecord> alloc;
+  if (!RemoveRandomAllocation(&alloc)) {
+    return false;
+  }
+
+  const void* ptr = alloc.first;
+  const AllocationRecord expected_alloc_record = alloc.second;
+
+  TRACK_MEMORY_SCOPE_DYNAMIC(unique_name_.c_str());
+  AllocationGroup* current_group = memory_tracker_->PeekAllocationGroup();
+
+  // Expect that the name of the current allocation group name is the same as
+  // what we expect.
+  if (current_group->name() != unique_name_) {
+    NoMemTracking no_memory_tracking_in_this_scope;
+    ADD_FAILURE_AT(__FILE__, __LINE__) << " " << current_group->name()
+                                       << " != " << unique_name_;
+  }
+
+  MemoryTrackerImpl::AllocationMapType* internal_alloc_map =
+      memory_tracker_->pointer_map();
+
+  AllocationRecord existing_alloc_record;
+
+  const bool found_existing_record =
+      memory_tracker_->GetMemoryTracking(ptr, &existing_alloc_record);
+
+  if (!found_existing_record) {
+    ADD_FAILURE_AT(__FILE__, __LINE__)
+        << "expected to find existing record, but did not";
+  } else if (current_group != existing_alloc_record.allocation_group) {
+    ADD_FAILURE_AT(__FILE__, __LINE__)
+        << "group allocation mismatch: " << current_group->name()
+        << " != " << existing_alloc_record.allocation_group->name() << "\n";
+  }
+  SbMemoryDeallocate(const_cast<void*>(ptr));
+  return true;
+}
+
+void AllocationStressThread::DoMalloc() {
+  ++do_malloc_counter_;
+  if (allocated_pts_.size() > 10000) {
+    return;
+  }
+
+  TRACK_MEMORY_SCOPE_DYNAMIC(unique_name_.c_str());
+  AllocationGroup* current_group = memory_tracker_->PeekAllocationGroup();
+
+  // Sanity check, make sure that the current_group name is the same as
+  // our unique name.
+  if (current_group->name() != unique_name_) {
+    NoMemTracking no_tracking_in_scope;
+    ADD_FAILURE_AT(__FILE__, __LINE__) << " " << current_group->name()
+                                       << " != " << unique_name_;
+  }
+
+  if (!memory_tracker_->IsMemoryTrackingEnabled()) {
+    NoMemTracking no_tracking_in_scope;
+    ADD_FAILURE_AT(__FILE__, __LINE__)
+        << " memory tracking state was disabled.";
+  }
+
+  const int alloc_size = std::rand() % 100 + 8;
+
+  void* memory = SbMemoryAllocate(alloc_size);
+
+  AllocationRecord record;
+  bool found = memory_tracker_->GetMemoryTracking(memory, &record);
+  if (!found) {
+    NoMemTracking no_tracking_in_scope;
+    ADD_FAILURE_AT(__FILE__, __LINE__)
+        << "Violated expectation, malloc counter: " << do_malloc_counter_;
+  }
+  AllocMap::iterator found_it = allocated_pts_.find(memory);
+
+  if (found_it != allocated_pts_.end()) {
+    NoMemTracking no_tracking_in_scope;
+    ADD_FAILURE_AT(__FILE__, __LINE__)
+        << "This pointer should not be in the map.";
+  }
+
+  NoMemTracking no_tracking_in_scope;  // DEBUG!!
+  allocated_pts_[memory] = AllocationRecord(alloc_size, current_group);
+}
+
+}  // namespace
+}  // namespace analytics
+}  // namespace nb
diff --git a/src/nb/analytics/memory_tracker_test.cc b/src/nb/analytics/memory_tracker_test.cc
new file mode 100644
index 0000000..92cb8b3
--- /dev/null
+++ b/src/nb/analytics/memory_tracker_test.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/analytics/memory_tracker.h"
+#include "nb/analytics/memory_tracker_helpers.h"
+#include "nb/memory_scope.h"
+#include "nb/scoped_ptr.h"
+#include "starboard/memory.h"
+#include "starboard/memory_reporter.h"
+#include "starboard/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace nb {
+namespace analytics {
+namespace {
+
+MemoryTracker* s_memory_tracker_ = NULL;
+
+struct NoMemTracking {
+  bool prev_val;
+  NoMemTracking() : prev_val(false) {
+    if (s_memory_tracker_) {
+      prev_val = s_memory_tracker_->IsMemoryTrackingEnabled();
+      s_memory_tracker_->SetMemoryTrackingEnabled(false);
+    }
+  }
+  ~NoMemTracking() {
+    if (s_memory_tracker_) {
+      s_memory_tracker_->SetMemoryTrackingEnabled(prev_val);
+    }
+  }
+};
+
+// EXPECT_XXX and ASSERT_XXX allocate memory, a big no-no when
+// for unit testing allocations. These overrides disable memory
+// tracking for the duration of the EXPECT and ASSERT operations.
+#define EXPECT_EQ_NO_TRACKING(A, B)                 \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    EXPECT_EQ(A, B);                                \
+  }
+
+#define EXPECT_TRUE_NO_TRACKING(A)                  \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    EXPECT_TRUE(A);                                 \
+  }
+
+#define EXPECT_FALSE_NO_TRACKING(A)                 \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    EXPECT_FALSE(A);                                \
+  }
+
+#define ASSERT_TRUE_NO_TRACKING(A)                  \
+  {                                                 \
+    NoMemTracking no_memory_tracking_in_this_scope; \
+    ASSERT_TRUE(A);                                 \
+  }
+
+///////////////////////////////////////////////////////////////////////////////
+// Framework which initializes the MemoryTracker once and installs it
+// for the first test and the removes the MemoryTracker after the
+// the last test finishes.
+class MemoryTrackerTest : public ::testing::Test {
+ public:
+  MemoryTrackerTest() {}
+
+  MemoryTracker* memory_tracker() { return s_memory_tracker_; }
+
+  bool GetAllocRecord(void* alloc_memory, AllocationRecord* output) {
+    return memory_tracker()->GetMemoryTracking(alloc_memory, output);
+  }
+
+  int64_t TotalNumberOfAllocations() {
+    return memory_tracker()->GetTotalNumberOfAllocations();
+  }
+
+  int64_t TotalAllocationBytes() {
+    return memory_tracker()->GetTotalAllocationBytes();
+  }
+
+  bool MemoryTrackerEnabled() const { return s_memory_tracker_enabled_; }
+
+ protected:
+  static void SetUpTestCase() {
+    s_memory_tracker_ = MemoryTracker::Get();
+    s_memory_tracker_enabled_ = s_memory_tracker_->InstallGlobalTrackingHooks();
+  }
+  static void TearDownTestCase() {
+    s_memory_tracker_->RemoveGlobalTrackingHooks();
+  }
+  static bool s_memory_tracker_enabled_;
+};
+bool MemoryTrackerTest::s_memory_tracker_enabled_ = false;
+
+///////////////////////////////////////////////////////////////////////////////
+class FindAllocationVisitor : public AllocationVisitor {
+ public:
+  FindAllocationVisitor() : found_(false), memory_to_find_(NULL) {}
+
+  bool found() const { return found_; }
+  void set_found(bool val) { found_ = val; }
+  void set_memory_to_find(const void* memory) {
+    memory_to_find_ = memory;
+    found_ = false;
+  }
+
+  virtual bool Visit(const void* memory, const AllocationRecord& alloc_record) {
+    if (memory_to_find_ == memory) {
+      found_ = true;
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  bool found_;
+  const void* memory_to_find_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+TEST_F(MemoryTrackerTest, MacrosScopedObject) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+
+  scoped_ptr<int> alloc_a, alloc_b;
+  {
+    TRACK_MEMORY_SCOPE("MemoryTrackerTest-ScopeA");
+    alloc_a.reset(new int());
+    {
+      TRACK_MEMORY_SCOPE("MemoryTrackerTest-ScopeB");
+      alloc_b.reset(new int());
+    }
+  }
+
+  // Now test that the allocations now exist in the memory tracker.
+  AllocationRecord alloc_record_a, alloc_record_b;
+  // Expect that the allocations exist and that the AllocRecords are written
+  // with the allocation information.
+  EXPECT_TRUE_NO_TRACKING(
+      memory_tracker()->GetMemoryTracking(alloc_a.get(), &alloc_record_a));
+  EXPECT_TRUE_NO_TRACKING(
+      memory_tracker()->GetMemoryTracking(alloc_b.get(), &alloc_record_b));
+
+  // Sanity test that the allocations are non-null.
+
+  const AllocationGroup* group_a = alloc_record_a.allocation_group;
+  const AllocationGroup* group_b = alloc_record_b.allocation_group;
+  ASSERT_TRUE_NO_TRACKING(group_a);
+  ASSERT_TRUE_NO_TRACKING(group_b);
+
+  EXPECT_EQ_NO_TRACKING(group_a->name(),
+                        std::string("MemoryTrackerTest-ScopeA"));
+  EXPECT_EQ_NO_TRACKING(group_b->name(),
+                        std::string("MemoryTrackerTest-ScopeB"));
+
+  // When the allocation is returned to the free store then it's expected that
+  // the memory tracker will indicate that the allocation no longer exists.
+  alloc_a.reset();
+  alloc_b.reset();
+
+  EXPECT_FALSE_NO_TRACKING(
+      memory_tracker()->GetMemoryTracking(alloc_a.get(), &alloc_record_a));
+  EXPECT_FALSE_NO_TRACKING(
+      memory_tracker()->GetMemoryTracking(alloc_b.get(), &alloc_record_b));
+
+  int32_t num_allocations = -1;
+  int64_t allocation_bytes = -1;
+
+  group_a->GetAggregateStats(&num_allocations, &allocation_bytes);
+  EXPECT_EQ_NO_TRACKING(0, num_allocations);
+  EXPECT_EQ_NO_TRACKING(0, allocation_bytes);
+
+  group_b->GetAggregateStats(&num_allocations, &allocation_bytes);
+  EXPECT_EQ_NO_TRACKING(0, num_allocations);
+  EXPECT_EQ_NO_TRACKING(0, allocation_bytes);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+TEST_F(MemoryTrackerTest, Visitor) {
+  // Memory tracker is not enabled for this build.
+  if (!MemoryTrackerEnabled()) {
+    return;
+  }
+
+  FindAllocationVisitor visitor;
+
+  scoped_ptr<int> alloc_a;
+  {
+    TRACK_MEMORY_SCOPE("MemoryTrackerTest-ScopeA");
+
+    alloc_a.reset(new int());
+    visitor.set_memory_to_find(alloc_a.get());
+    memory_tracker()->Accept(&visitor);
+    EXPECT_TRUE_NO_TRACKING(visitor.found());
+
+    alloc_a.reset(NULL);
+    visitor.set_found(false);
+    memory_tracker()->Accept(&visitor);
+    EXPECT_FALSE_NO_TRACKING(visitor.found());
+  }
+}
+
+}  // namespace
+}  // namespace analytics
+}  // namespace nb
diff --git a/src/nb/hash.cc b/src/nb/hash.cc
new file mode 100644
index 0000000..b04c7d5
--- /dev/null
+++ b/src/nb/hash.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/hash.h"
+
+#include "starboard/types.h"
+
+namespace nb {
+
+namespace {
+// Injects the super fast hash into the nb namespace.
+#include "third_party/super_fast_hash/super_fast_hash.cc"
+}  // namespace
+
+uint32_t RuntimeHash32(const char* data, int length, uint32_t prev_hash) {
+  return SuperFastHash(data, length, prev_hash);
+}
+
+}  // namespace nb
diff --git a/src/nb/hash.h b/src/nb/hash.h
new file mode 100644
index 0000000..fe48e3d
--- /dev/null
+++ b/src/nb/hash.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NB_HASH_H_
+#define NB_HASH_H_
+
+#include "starboard/types.h"
+
+namespace nb {
+
+enum { kDefaultSeed = 0x12345789 };
+
+// RuntimeHash32 is a 32 bit hash for data. The only guarantee is that this
+// hash is persistent for the lifetime of the program. This hash function
+// is not guaranteed to be consistent across platforms. The hash value should
+// never be saved to disk.
+// It's sufficient however, to use as an in-memory hashtable.
+uint32_t RuntimeHash32(const char* data,
+                       int length,
+                       uint32_t prev_hash = kDefaultSeed);
+
+}  // namespace nb
+
+#endif  // NB_HASH_H_
diff --git a/src/nb/memory_scope.cc b/src/nb/memory_scope.cc
new file mode 100644
index 0000000..f475a16
--- /dev/null
+++ b/src/nb/memory_scope.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nb/memory_scope.h"
+#include "starboard/log.h"
+#include "starboard/atomic.h"
+
+NbMemoryScopeReporter* s_memory_reporter_ = NULL;
+
+bool NbSetMemoryScopeReporter(NbMemoryScopeReporter* reporter) {
+  // Flush all the pending memory writes out to main memory so that
+  // other threads see a fully constructed reporter.
+  SbAtomicMemoryBarrier();
+  s_memory_reporter_ = reporter;
+#if !defined(STARBOARD_ALLOWS_MEMORY_TRACKING)
+  SbLogRaw("\nMemory Scope Reporting is disabled because this build does "
+           "not support it. Try a QA, devel or debug build.\n");
+  return false;
+#else
+  return true;
+#endif
+}
+
+void NbPushMemoryScope(NbMemoryScopeInfo* memory_scope_info) {
+#if !defined(STARBOARD_ALLOWS_MEMORY_TRACKING)
+  return;
+#else
+  if (SB_LIKELY(!s_memory_reporter_)) {
+    return;
+  }
+  s_memory_reporter_->push_memory_scope_cb(
+    s_memory_reporter_->context,
+    memory_scope_info);
+#endif
+}
+
+void NbPopMemoryScope() {
+#if !defined(STARBOARD_ALLOWS_MEMORY_TRACKING)
+  return;
+#else
+  if (SB_LIKELY(!s_memory_reporter_)) {
+    return;
+  }
+  s_memory_reporter_->pop_memory_scope_cb(s_memory_reporter_->context);
+#endif
+}
diff --git a/src/nb/memory_scope.h b/src/nb/memory_scope.h
new file mode 100644
index 0000000..9c9446b
--- /dev/null
+++ b/src/nb/memory_scope.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NB_MEMORY_SCOPE_H_
+#define NB_MEMORY_SCOPE_H_
+
+#include "starboard/types.h"
+
+///////////////////////////////////////////////////////////////////////////////
+// Macros to define the memory scope objects. These are objects that are used
+// to annotate sections of the code base as belonging to a particular memory
+// scope. Note that this is an annotation and does not memory allocation.
+///////////////////////////////////////////////////////////////////////////////
+
+// Macro to track the memory scope inside a function or block of code. The
+// memory scope is in effect until the end of the code block.
+// Example:
+//   void Foo() {
+//     TRACK_MEMORY_SCOPE("FooMemoryScope");
+//     // pops the memory scope at the end.
+//   }
+
+#if !defined(__cplusplus)
+// Disallow macro use for non-C++ builds.
+#define TRACK_MEMORY_SCOPE(STR) error_forbidden_in_non_c_plus_plus_code
+#define TRACK_MEMORY_SCOPE_DYNAMIC(STR) error_forbidden_in_non_c_plus_plus_code
+#elif defined(STARBOARD_ALLOWS_MEMORY_TRACKING)
+#define TRACK_MEMORY_SCOPE(STR) TRACK_MEMORY_STATIC_CACHED(STR)
+#define TRACK_MEMORY_SCOPE_DYNAMIC(STR) TRACK_MEMORY_STATIC_NOT_CACHED(STR)
+#else
+// No-op when starboard does not allow memory tracking.
+#define TRACK_MEMORY_SCOPE(STR)
+#define TRACK_MEMORY_SCOPE_DYNAMIC(STR)
+#endif
+
+// Preprocessor needs double expansion in order to __FILE__, __LINE__ and
+// __FUNCTION__ properly.
+#define TRACK_MEMORY_STATIC_CACHED(STR) \
+  TRACK_MEMORY_STATIC_CACHED_IMPL_2(STR, __FILE__, __LINE__, __FUNCTION__)
+
+#define TRACK_MEMORY_STATIC_NOT_CACHED(STR) \
+  TRACK_MEMORY_STATIC_NOT_CACHED_IMPL_2(STR, __FILE__, __LINE__, __FUNCTION__)
+
+// Only enable TRACK_MEMORY_STATIC_CACHED_IMPL_2 if starboard allows memory
+// tracking.
+#define TRACK_MEMORY_STATIC_CACHED_IMPL_2(Str, FileStr, LineNum, FuncStr) \
+  static NbMemoryScopeInfo memory_scope_handle_##LineNum =                \
+      { 0, Str, FileStr, LineNum, FuncStr, true };                        \
+  NbPushMemoryScope(&memory_scope_handle_##LineNum);                      \
+  NbPopMemoryScopeOnScopeEnd pop_on_scope_end_##LineNum;
+
+#define TRACK_MEMORY_STATIC_NOT_CACHED_IMPL_2(Str, FileStr, LineNum, FuncStr) \
+  NbMemoryScopeInfo memory_scope_handle_##LineNum = {                         \
+      0, Str, FileStr, LineNum, FuncStr, false};                              \
+  NbPushMemoryScope(&memory_scope_handle_##LineNum);                          \
+  NbPopMemoryScopeOnScopeEnd pop_on_scope_end_##LineNum;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct NbMemoryScopeReporter;
+struct NbMemoryScopeInfo;
+
+// Sets the memory reporter. Returns true on success, false something
+// goes wrong.
+bool NbSetMemoryScopeReporter(NbMemoryScopeReporter* reporter);
+
+// Note that we pass by pointer because the memory scope contains a
+// variable allowing the result to be cached.
+void NbPushMemoryScope(NbMemoryScopeInfo* memory_scope);
+void NbPopMemoryScope();
+
+///////////////////////////////////////////////////////////////////////////////
+// Implementation
+///////////////////////////////////////////////////////////////////////////////
+// Interface for handling memory scopes.
+typedef void (*NbReportPushMemoryScopeCallback)(void* context,
+                                                NbMemoryScopeInfo* info);
+typedef void (*NbReportPopMemoryScopeCallback)(void* context);
+
+struct NbMemoryScopeReporter {
+  // Callback to report pushing of memory scope.
+  NbReportPushMemoryScopeCallback push_memory_scope_cb;
+
+  // Callback to report poping of the memory scope.
+  NbReportPopMemoryScopeCallback pop_memory_scope_cb;
+
+  // Optional, is passed to the callbacks as first argument.
+  void* context;
+};
+
+// This MemoryScope must remain a POD data type so that it can be statically
+// initialized.
+struct NbMemoryScopeInfo {
+  // cached_handle_ allows a cached result of the the fields represented in
+  // this struct to be generated and the handle be placed into this field.
+  // See also allows_caching_.
+  uintptr_t cached_handle_;
+
+  // Represents the name of the memory scope. I.E. "Javascript" or "Gfx".
+  const char* memory_scope_name_;
+
+  // Represents the file name that this memory scope was created at.
+  const char* file_name_;
+
+  // Represents the line number that this memory scope was created at.
+  int line_number_;
+
+  // Represents the function name that this memory scope was created at.
+  const char* function_name_;
+
+  // When true, if cached_handle_ is 0 then an object may be created that
+  // represents the fields of this object. The handle that represents this
+  // cached object is then placed in cached_hanlde_.
+  const bool allows_caching_;
+};
+
+// NbPopMemoryScopeOnScopeEnd is only allowed for C++ builds.
+#ifdef __cplusplus
+// A helper that pops the memory scope at the end of the current code block.
+struct NbPopMemoryScopeOnScopeEnd {
+  ~NbPopMemoryScopeOnScopeEnd() { NbPopMemoryScope(); }
+};
+#endif
+
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // NB_MEMORY_SCOPE_H_
diff --git a/src/nb/memory_scope_test.cc b/src/nb/memory_scope_test.cc
new file mode 100644
index 0000000..e734c2f
--- /dev/null
+++ b/src/nb/memory_scope_test.cc
@@ -0,0 +1,257 @@
+/*

+ * Copyright 2016 Google Inc. All Rights Reserved.

+ *

+ * Licensed under the Apache License, Version 2.0 (the "License");

+ * you may not use this file except in compliance with the License.

+ * You may obtain a copy of the License at

+ *

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ *

+ * Unless required by applicable law or agreed to in writing, software

+ * distributed under the License is distributed on an "AS IS" BASIS,

+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ * See the License for the specific language governing permissions and

+ * limitations under the License.

+ */

+

+#include "nb/memory_scope.h"

+#include "starboard/mutex.h"

+#include "nb/thread_local_object.h"

+

+#include "testing/gtest/include/gtest/gtest.h"

+

+namespace nb {

+namespace {

+

+bool StarboardAllowsMemoryTracking() {

+#if defined(STARBOARD_ALLOWS_MEMORY_TRACKING)

+  return true;

+#else

+  return false;

+#endif

+}

+

+// This is a memory scope reporter that is compatible

+// with the MemoryScopeReporter.

+class TestMemoryScopeReporter {

+ public:

+  typedef std::vector<NbMemoryScopeInfo*> MemoryScopeVector;

+

+  TestMemoryScopeReporter() {

+    memory_scope_reporter_ = CreateMemoryScopeReporter();

+  }

+

+  NbMemoryScopeReporter* memory_scope_reporter() {

+    return &memory_scope_reporter_;

+  }

+

+  MemoryScopeVector* stack_thread_local() { return stack_tlo_.GetOrCreate(); }

+

+  void OnPushMemoryScope(NbMemoryScopeInfo* memory_scope) {

+    stack_thread_local()->push_back(memory_scope);

+  }

+

+  void OnPopMemoryScope() {

+    MemoryScopeVector* stack = stack_thread_local();

+    if (!stack->empty()) {

+      stack->pop_back();

+    } else {

+      ADD_FAILURE_AT(__FILE__, __LINE__)

+          << " stack was empty and could not be popped.";

+    }

+  }

+

+ private:

+  static void OnPushMemoryScopeCallback(void* context,

+                                        NbMemoryScopeInfo* info) {

+    TestMemoryScopeReporter* t = static_cast<TestMemoryScopeReporter*>(context);

+    t->OnPushMemoryScope(info);

+  }

+

+  static void OnPopMemoryScopeCallback(void* context) {

+    TestMemoryScopeReporter* t = static_cast<TestMemoryScopeReporter*>(context);

+    t->OnPopMemoryScope();

+  }

+

+  NbMemoryScopeReporter CreateMemoryScopeReporter() {

+    NbMemoryScopeReporter reporter = {OnPushMemoryScopeCallback,

+                                      OnPopMemoryScopeCallback, this};

+    return reporter;

+  }

+

+  NbMemoryScopeReporter memory_scope_reporter_;

+  ThreadLocalObject<MemoryScopeVector> stack_tlo_;

+};

+

+// A test framework for testing the Pushing & popping memory scopes.

+// The key feature here is that reporter is setup on the first test

+// instance and torn down after the last test has run.

+class MemoryScopeReportingTest : public ::testing::Test {

+ public:

+  TestMemoryScopeReporter* test_memory_reporter() { return s_reporter_; }

+

+  bool reporting_enabled() const { return s_reporter_enabled_; }

+

+ protected:

+  static void SetUpTestCase() {

+    if (!s_reporter_) {

+      s_reporter_ = new TestMemoryScopeReporter;

+    }

+    s_reporter_enabled_ =

+        NbSetMemoryScopeReporter(s_reporter_->memory_scope_reporter());

+

+    EXPECT_EQ(StarboardAllowsMemoryTracking(), s_reporter_enabled_)

+        << "Expected the memory scope reporter to be enabled whenever "

+           "starboard memory tracking is allowed.";

+  }

+

+  static void TearDownTestCase() {

+    // The reporter itself is not deleted because other threads could

+    // be traversing through it's data structures. It's better just to leave

+    // the object alive for the purposes of this unit test and set the pointer

+    // to NULL.

+    // This is done in order to make the MemoryScopeReport object lock free.

+    // This increases performance and reduces complexity of design.

+    NbSetMemoryScopeReporter(NULL);

+  }

+

+  // Per test setup.

+  virtual void SetUp() {

+    test_memory_reporter()->stack_thread_local()->clear();

+  }

+

+  static TestMemoryScopeReporter* s_reporter_;

+  static bool s_reporter_enabled_;

+};

+TestMemoryScopeReporter* MemoryScopeReportingTest::s_reporter_ = NULL;

+bool MemoryScopeReportingTest::s_reporter_enabled_;

+

+///////////////////////////////////////////////////////////////////////////////

+// TESTS.

+// There are two sets of tests: POSITIVE and NEGATIVE.

+//  The positive tests are active when STARBOARD_ALLOWS_MEMORY_TRACKING is

+//  defined and test that memory tracking is enabled.

+//  NEGATIVE tests ensure that tracking is disabled when

+//  STARBOARD_ALLOWS_MEMORY_TRACKING is not defined.

+// When adding new tests:

+//  POSITIVE tests are named normally.

+//  NEGATIVE tests are named with "No" prefixed to the beginning.

+//  Example:

+//   TEST_F(MemoryScopeReportingTest, PushPop) <--- POSITIVE test.

+//   TEST_F(MemoryScopeReportingTest, NoPushPop) <- NEGATIVE test.

+//  All positive & negative tests are grouped together.

+///////////////////////////////////////////////////////////////////////////////

+

+#if defined(STARBOARD_ALLOWS_MEMORY_TRACKING)

+// These are POSITIVE tests, which test the expectation that when the define

+// STARBOARD_ALLOWS_MEMORY_TRACKING is active that the memory scope reporter

+// will receive memory scope notifications.

+

+// Tests the assumption that the SbMemoryAllocate and SbMemoryDeallocate

+// will report memory allocations.

+TEST_F(MemoryScopeReportingTest, PushPop) {

+  ASSERT_TRUE(reporting_enabled());

+  const int line_number = __LINE__;

+  const char* file_name = __FILE__;

+  const char* function_name = __FUNCTION__;

+  NbMemoryScopeInfo info = {0,              // Cached value (null).

+                            "Javascript",   // Name of the memory scope.

+                            file_name,      // Filename that invoked this.

+                            line_number,    // Line number.

+                            function_name,  // Function name.

+                            true};          // true allows caching.

+

+  NbPushMemoryScope(&info);

+

+  ASSERT_FALSE(test_memory_reporter()->stack_thread_local()->empty());

+  NbMemoryScopeInfo* info_ptr =

+      test_memory_reporter()->stack_thread_local()->front();

+

+  EXPECT_EQ(&info, info_ptr);

+  EXPECT_STREQ(info.file_name_, file_name);

+  EXPECT_STREQ(info.function_name_, function_name);

+  EXPECT_EQ(info.line_number_, line_number);

+

+  NbPopMemoryScope();

+  EXPECT_TRUE(test_memory_reporter()->stack_thread_local()->empty());

+}

+

+// Tests the expectation that the memory reporting macros will

+// push/pop memory regions and will also correctly bind to the

+// file, linenumber and also the function name.

+TEST_F(MemoryScopeReportingTest, Macros) {

+  ASSERT_TRUE(reporting_enabled());

+  // There should be no leftover stack objects.

+  EXPECT_TRUE(test_memory_reporter()->stack_thread_local()->empty());

+  {

+    const int line_before = __LINE__;

+    TRACK_MEMORY_SCOPE("TestMemoryScope");

+    const int predicted_line = line_before + 1;

+

+    NbMemoryScopeInfo* info_ptr =

+        test_memory_reporter()->stack_thread_local()->front();

+

+    // TRACK_MEMORY_SCOPE is defined to allow caching.

+    EXPECT_EQ(true, info_ptr->allows_caching_);

+

+    // The cached_handle_ is not mutated by TestMemoryScopeReporter so

+    // therefore it should be the default value of 0.

+    EXPECT_EQ(0, info_ptr->cached_handle_);

+

+    EXPECT_STREQ("TestMemoryScope", info_ptr->memory_scope_name_);

+    EXPECT_STREQ(__FILE__, info_ptr->file_name_);

+    EXPECT_EQ(predicted_line, info_ptr->line_number_);

+    EXPECT_STREQ(__FUNCTION__, info_ptr->function_name_);

+  }

+  // Expect that the stack object is now empty again.

+  EXPECT_TRUE(test_memory_reporter()->stack_thread_local()->empty());

+}

+

+#else  // !defined(STARBOARD_ALLOWS_MEMORY_TRACKING)

+// These are NEGATIVE tests, which test the expectation that when the

+// STARBOARD_ALLOWS_MEMORY_TRACKING is undefined that the memory scope reprter

+// does not receive memory scope notifications.

+

+// Tests the expectation that push pop does not send notifications to the

+// reporter when disabled.

+TEST_F(MemoryScopeReportingTest, NoPushPop) {

+  ASSERT_FALSE(reporting_enabled());

+  const int line_number = __LINE__;

+  const char* file_name = __FILE__;

+  const char* function_name = __FUNCTION__;

+  NbMemoryScopeInfo info = {0,              // Cached value (null).

+                            "Javascript",   // Name of the memory scope.

+                            file_name,      // Filename that invoked this.

+                            line_number,    // Line number.

+                            function_name,  // Function name.

+                            true};          // true allows caching.

+

+  NbPushMemoryScope(&info);

+

+  ASSERT_FALSE(test_memory_reporter()->stack_thread_local()->empty());

+  NbMemoryScopeInfo* info_ptr =

+      test_memory_reporter()->stack_thread_local()->front();

+

+  EXPECT_EQ(&info, info_ptr);

+  EXPECT_STREQ(info.file_name_, file_name);

+  EXPECT_STREQ(info.function_name_, function_name);

+  EXPECT_EQ(info.line_number_, line_number);

+

+  NbPopMemoryScope();

+  EXPECT_TRUE(test_memory_reporter()->stack_thread_local()->empty());

+}

+

+// Tests the expectation that the memory reporting macros are disabled when

+// memory tracking is not allowed.

+TEST_F(MemoryScopeReportingTest, NoMacros) {

+  ASSERT_FALSE(reporting_enabled());

+  // Test that the macros do nothing when memory reporting has been

+  // disabled.

+  TRACK_MEMORY_SCOPE("InternalMemoryRegion");

+  ASSERT_TRUE(test_memory_reporter()->stack_thread_local()->empty())

+      << "Memory reporting received notifications when it should be disabled.";

+}

+#endif

+

+}  // namespace.

+}  // namespace nb.

diff --git a/src/nb/nb.gyp b/src/nb/nb.gyp
index f87d42a..855c800 100644
--- a/src/nb/nb.gyp
+++ b/src/nb/nb.gyp
@@ -24,11 +24,21 @@
         'allocator.h',
         'allocator_decorator.cc',
         'allocator_decorator.h',
+        'analytics/memory_tracker.cc',
+        'analytics/memory_tracker.h',
+        'analytics/memory_tracker_impl.cc',
+        'analytics/memory_tracker_impl.h',
+        'analytics/memory_tracker_helpers.cc',
+        'analytics/memory_tracker_helpers.h',
         'atomic.h',
         'fixed_no_free_allocator.cc',
         'fixed_no_free_allocator.h',
+        'hash.cc',
+        'hash.h',
         'memory_pool.cc',
         'memory_pool.h',
+        'memory_scope.cc',
+        'memory_scope.h',
         'move.h',
         'pointer_arithmetic.h',
         'rect.h',
@@ -62,8 +72,12 @@
       'target_name': 'nb_test',
       'type': '<(gtest_target_type)',
       'sources': [
+        'analytics/memory_tracker_helpers_test.cc',
+        'analytics/memory_tracker_impl_test.cc',
+        'analytics/memory_tracker_test.cc',
         'atomic_test.cc',
         'fixed_no_free_allocator_test.cc',
+        'memory_scope_test.cc',
         'reuse_allocator_test.cc',
         'run_all_unittests.cc',
         'test_thread.h',