Commit 6ece5aec authored by scottmg's avatar scottmg Committed by Commit bot

Remove some LazyInstance use in base/

R=mark@chromium.org
TBR=jam@chromium.org
BUG=686866, 587210, 686730

Review-Url: https://codereview.chromium.org/2667513003
Cr-Commit-Position: refs/heads/master@{#447558}
parent 7b1efd5d
......@@ -16,7 +16,6 @@
#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#endif
#if defined(ARCH_CPU_X86_FAMILY)
......@@ -94,9 +93,8 @@ uint64_t _xgetbv(uint32_t xcr) {
#endif // ARCH_CPU_X86_FAMILY
#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
class LazyCpuInfoValue {
public:
LazyCpuInfoValue() {
std::string* CpuInfoBrand() {
static std::string* brand = []() {
// This function finds the value from /proc/cpuinfo under the key "model
// name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
// and later for arm64) and is shown once per CPU. "Processor" is used in
......@@ -109,30 +107,23 @@ class LazyCpuInfoValue {
ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
DCHECK(!contents.empty());
if (contents.empty()) {
return;
return new std::string();
}
std::istringstream iss(contents);
std::string line;
while (std::getline(iss, line)) {
if (brand_.empty() &&
(line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
if ((line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
brand_.assign(line.substr(strlen(kModelNamePrefix)));
return new std::string(line.substr(strlen(kModelNamePrefix)));
}
}
}
const std::string& brand() const { return brand_; }
private:
std::string brand_;
DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
};
base::LazyInstance<LazyCpuInfoValue>::Leaky g_lazy_cpuinfo =
LAZY_INSTANCE_INITIALIZER;
return new std::string();
}();
return brand;
}
#endif // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
// defined(OS_LINUX))
......@@ -221,7 +212,7 @@ void CPU::Initialize() {
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
}
#elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
cpu_brand_.assign(g_lazy_cpuinfo.Get().brand());
cpu_brand_.assign(*CpuInfoBrand());
#endif
}
......
......@@ -12,7 +12,6 @@
#include <memory>
#include <vector>
#include "base/lazy_instance.h"
#include "base/macros.h"
#include "base/win/iat_patch_function.h"
#include "base/win/pe_image.h"
......@@ -197,13 +196,11 @@ class HandleHooks {
void AddIATPatch(HMODULE module);
void AddEATPatch();
void Unpatch();
private:
std::vector<base::win::IATPatchFunction*> hooks_;
DISALLOW_COPY_AND_ASSIGN(HandleHooks);
};
base::LazyInstance<HandleHooks> g_hooks = LAZY_INSTANCE_INITIALIZER;
void HandleHooks::AddIATPatch(HMODULE module) {
if (!module)
......@@ -232,14 +229,6 @@ void HandleHooks::AddEATPatch() {
reinterpret_cast<void**>(&g_duplicate_function));
}
void HandleHooks::Unpatch() {
for (std::vector<base::win::IATPatchFunction*>::iterator it = hooks_.begin();
it != hooks_.end(); ++it) {
(*it)->Unpatch();
delete *it;
}
}
void PatchLoadedModules(HandleHooks* hooks) {
const DWORD kSize = 256;
DWORD returned;
......@@ -259,7 +248,7 @@ void PatchLoadedModules(HandleHooks* hooks) {
} // namespace
void InstallHandleHooks() {
HandleHooks* hooks = g_hooks.Pointer();
static HandleHooks* hooks = new HandleHooks();
// Performing EAT interception first is safer in the presence of other
// threads attempting to call CloseHandle.
......@@ -267,10 +256,5 @@ void InstallHandleHooks() {
PatchLoadedModules(hooks);
}
void RemoveHandleHooks() {
// We are partching all loaded modules without forcing them to stay in memory,
// removing patches is not safe.
}
} // namespace debug
} // namespace base
......@@ -13,9 +13,6 @@ namespace debug {
// Installs the hooks required to debug use of improper handles.
BASE_EXPORT void InstallHandleHooks();
// Removes the hooks installed by InstallHandleHooks().
BASE_EXPORT void RemoveHandleHooks();
} // namespace debug
} // namespace base
......
......@@ -4,7 +4,6 @@
#include "base/memory/memory_pressure_listener.h"
#include "base/lazy_instance.h"
#include "base/observer_list_threadsafe.h"
#include "base/trace_event/trace_event.h"
......@@ -51,8 +50,10 @@ class MemoryPressureObserver {
DISALLOW_COPY_AND_ASSIGN(MemoryPressureObserver);
};
LazyInstance<MemoryPressureObserver>::Leaky g_observer =
LAZY_INSTANCE_INITIALIZER;
MemoryPressureObserver* GetMemoryPressureObserver() {
static auto observer = new MemoryPressureObserver();
return observer;
}
subtle::Atomic32 g_notifications_suppressed = 0;
......@@ -61,7 +62,7 @@ subtle::Atomic32 g_notifications_suppressed = 0;
MemoryPressureListener::MemoryPressureListener(
const MemoryPressureListener::MemoryPressureCallback& callback)
: callback_(callback) {
g_observer.Get().AddObserver(this, false);
GetMemoryPressureObserver()->AddObserver(this, false);
}
MemoryPressureListener::MemoryPressureListener(
......@@ -70,11 +71,11 @@ MemoryPressureListener::MemoryPressureListener(
sync_memory_pressure_callback)
: callback_(callback),
sync_memory_pressure_callback_(sync_memory_pressure_callback) {
g_observer.Get().AddObserver(this, true);
GetMemoryPressureObserver()->AddObserver(this, true);
}
MemoryPressureListener::~MemoryPressureListener() {
g_observer.Get().RemoveObserver(this);
GetMemoryPressureObserver()->RemoveObserver(this);
}
void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
......@@ -123,7 +124,7 @@ void MemoryPressureListener::DoNotifyMemoryPressure(
MemoryPressureLevel memory_pressure_level) {
DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
g_observer.Get().Notify(memory_pressure_level);
GetMemoryPressureObserver()->Notify(memory_pressure_level);
}
} // namespace base
......@@ -9,7 +9,6 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump_default.h"
......@@ -38,10 +37,11 @@ namespace base {
namespace {
// A lazily created thread local storage for quick access to a thread's message
// loop, if one exists. This should be safe and free of static constructors.
LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
LAZY_INSTANCE_INITIALIZER;
// loop, if one exists.
base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
static auto lazy_tls_ptr = new base::ThreadLocalPointer<MessageLoop>();
return lazy_tls_ptr;
}
MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
#if defined(OS_IOS)
......@@ -137,7 +137,7 @@ MessageLoop::~MessageLoop() {
// OK, now make it so that no one can find us.
if (current() == this)
lazy_tls_ptr.Pointer()->Set(nullptr);
GetTLSMessageLoop()->Set(nullptr);
}
// static
......@@ -145,7 +145,7 @@ MessageLoop* MessageLoop::current() {
// TODO(darin): sadly, we cannot enable this yet since people call us even
// when they have no intention of using us.
// DCHECK(loop) << "Ouch, did you forget to initialize me?";
return lazy_tls_ptr.Pointer()->Get();
return GetTLSMessageLoop()->Get();
}
// static
......@@ -338,7 +338,7 @@ void MessageLoop::BindToCurrentThread() {
pump_ = CreateMessagePumpForType(type_);
DCHECK(!current()) << "should only have one message loop per thread";
lazy_tls_ptr.Pointer()->Set(this);
GetTLSMessageLoop()->Set(this);
incoming_task_queue_->StartScheduling();
unbound_task_runner_->BindToCurrentThread();
......
......@@ -13,7 +13,6 @@
#include "base/containers/hash_tables.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
......@@ -129,10 +128,9 @@ struct PathData {
}
};
static LazyInstance<PathData>::Leaky g_path_data = LAZY_INSTANCE_INITIALIZER;
static PathData* GetPathData() {
return g_path_data.Pointer();
static auto path_data = new PathData();
return path_data;
}
// Tries to find |key| in the cache. |path_data| should be locked by the caller!
......
......@@ -5,7 +5,6 @@
#include "base/threading/thread_local_storage.h"
#include "base/atomicops.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
......@@ -90,9 +89,12 @@ struct TlsVectorEntry {
uint32_t version;
};
// This LazyInstance isn't needed until after we've constructed the per-thread
// TLS vector, so it's safe to use.
base::LazyInstance<base::Lock>::Leaky g_tls_metadata_lock;
// This lock isn't needed until after we've constructed the per-thread TLS
// vector, so it's safe to use.
base::Lock* GetTLSMetadataLock() {
static auto lock = new base::Lock();
return lock;
}
TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
size_t g_last_assigned_slot = 0;
......@@ -182,7 +184,7 @@ void OnThreadExitInternal(TlsVectorEntry* tls_data) {
// Snapshot the TLS Metadata so we don't have to lock on every access.
TlsMetadata tls_metadata[kThreadLocalStorageSize];
{
base::AutoLock auto_lock(g_tls_metadata_lock.Get());
base::AutoLock auto_lock(*GetTLSMetadataLock());
memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
}
......@@ -261,7 +263,7 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
slot_ = kInvalidSlotValue;
version_ = 0;
{
base::AutoLock auto_lock(g_tls_metadata_lock.Get());
base::AutoLock auto_lock(*GetTLSMetadataLock());
for (int i = 0; i < kThreadLocalStorageSize; ++i) {
// Tracking the last assigned slot is an attempt to find the next
// available slot within one iteration. Under normal usage, slots remain
......@@ -291,7 +293,7 @@ void ThreadLocalStorage::StaticSlot::Free() {
DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
{
base::AutoLock auto_lock(g_tls_metadata_lock.Get());
base::AutoLock auto_lock(*GetTLSMetadataLock());
g_tls_metadata[slot_].status = TlsStatus::FREE;
g_tls_metadata[slot_].destructor = nullptr;
++(g_tls_metadata[slot_].version);
......
......@@ -5,7 +5,6 @@
#include "base/threading/watchdog.h"
#include "base/compiler_specific.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/threading/platform_thread.h"
......@@ -31,7 +30,10 @@ struct StaticData {
TimeDelta last_debugged_alarm_delay;
};
LazyInstance<StaticData>::Leaky g_static_data = LAZY_INSTANCE_INITIALIZER;
StaticData* GetStaticData() {
static auto static_data = new StaticData();
return static_data;
}
} // namespace
......@@ -119,7 +121,7 @@ void Watchdog::Alarm() {
void Watchdog::ThreadDelegate::ThreadMain() {
SetThreadName();
TimeDelta remaining_duration;
StaticData* static_data = g_static_data.Pointer();
StaticData* static_data = GetStaticData();
while (1) {
AutoLock lock(watchdog_->lock_);
while (DISARMED == watchdog_->state_)
......@@ -175,7 +177,7 @@ void Watchdog::ThreadDelegate::SetThreadName() const {
// static
void Watchdog::ResetStaticData() {
StaticData* static_data = g_static_data.Pointer();
StaticData* static_data = GetStaticData();
AutoLock lock(static_data->lock);
static_data->last_debugged_alarm_time = TimeTicks();
static_data->last_debugged_alarm_delay = TimeDelta();
......
......@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/debug/leak_annotations.h"
#include "base/lazy_instance.h"
#include "base/macros.h"
#include "base/task_runner.h"
#include "base/threading/post_task_and_reply_impl.h"
......@@ -98,9 +97,6 @@ struct TaskRunnerHolder {
scoped_refptr<TaskRunner> taskrunners_[2];
};
base::LazyInstance<TaskRunnerHolder>::Leaky
g_taskrunners = LAZY_INSTANCE_INITIALIZER;
} // namespace
bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
......@@ -120,7 +116,8 @@ bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
// static
const scoped_refptr<TaskRunner>&
WorkerPool::GetTaskRunner(bool tasks_are_slow) {
return g_taskrunners.Get().taskrunners_[tasks_are_slow];
static auto task_runner_holder = new TaskRunnerHolder();
return task_runner_holder->taskrunners_[tasks_are_slow];
}
} // namespace base
......@@ -16,21 +16,23 @@ namespace base {
namespace {
base::LazyInstance<ThreadLocalBoolean>::Leaky
g_worker_pool_running_on_this_thread = LAZY_INSTANCE_INITIALIZER;
ThreadLocalBoolean* GetWorkerPoolRunningOnThisThread() {
static auto thread_local_boolean = new ThreadLocalBoolean();
return thread_local_boolean;
}
DWORD CALLBACK WorkItemCallback(void* param) {
PendingTask* pending_task = static_cast<PendingTask*>(param);
TRACE_TASK_EXECUTION("WorkerThread::ThreadMain::Run", *pending_task);
g_worker_pool_running_on_this_thread.Get().Set(true);
GetWorkerPoolRunningOnThisThread()->Set(true);
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
std::move(pending_task->task).Run();
stopwatch.Stop();
g_worker_pool_running_on_this_thread.Get().Set(false);
GetWorkerPoolRunningOnThisThread()->Set(false);
tracked_objects::ThreadData::TallyRunOnWorkerThreadIfTracking(
pending_task->birth_tally, pending_task->time_posted, stopwatch);
......@@ -65,7 +67,7 @@ bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
// static
bool WorkerPool::RunsTasksOnCurrentThread() {
return g_worker_pool_running_on_this_thread.Get().Get();
return GetWorkerPoolRunningOnThisThread()->Get();
}
} // namespace base
......@@ -26,7 +26,6 @@
#endif
#if !defined(OS_MACOSX)
#include "base/lazy_instance.h"
#include "base/synchronization/lock.h"
#endif
......@@ -35,8 +34,10 @@ namespace {
#if !defined(OS_MACOSX)
// This prevents a crash on traversing the environment global and looking up
// the 'TZ' variable in libc. See: crbug.com/390567.
base::LazyInstance<base::Lock>::Leaky
g_sys_time_to_time_struct_lock = LAZY_INSTANCE_INITIALIZER;
base::Lock* GetSysTimeToTimeStructLock() {
static auto lock = new base::Lock();
return lock;
}
// Define a system-specific SysTime that wraps either to a time_t or
// a time64_t depending on the host system, and associated convertion.
......@@ -45,7 +46,7 @@ base::LazyInstance<base::Lock>::Leaky
typedef time64_t SysTime;
SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
return mktime64(timestruct);
else
......@@ -53,7 +54,7 @@ SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
}
void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
localtime64_r(&t, timestruct);
else
......@@ -64,7 +65,7 @@ void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
typedef time_t SysTime;
SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
return mktime(timestruct);
else
......@@ -72,7 +73,7 @@ SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
}
void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
base::AutoLock locked(*GetSysTimeToTimeStructLock());
if (is_local)
localtime_r(&t, timestruct);
else
......
......@@ -40,7 +40,6 @@
#include "base/atomicops.h"
#include "base/bit_cast.h"
#include "base/cpu.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
......@@ -97,8 +96,10 @@ bool g_high_res_timer_enabled = false;
// How many times the high resolution timer has been called.
uint32_t g_high_res_timer_count = 0;
// The lock to control access to the above two variables.
base::LazyInstance<base::Lock>::Leaky g_high_res_lock =
LAZY_INSTANCE_INITIALIZER;
base::Lock* GetHighResLock() {
static auto lock = new base::Lock();
return lock;
}
// Returns the current value of the performance counter.
uint64_t QPCNowRaw() {
......@@ -191,7 +192,7 @@ FILETIME Time::ToFileTime() const {
// static
void Time::EnableHighResolutionTimer(bool enable) {
base::AutoLock lock(g_high_res_lock.Get());
base::AutoLock lock(*GetHighResLock());
if (g_high_res_timer_enabled == enable)
return;
g_high_res_timer_enabled = enable;
......@@ -218,7 +219,7 @@ bool Time::ActivateHighResolutionTimer(bool activating) {
// called.
const uint32_t max = std::numeric_limits<uint32_t>::max();
base::AutoLock lock(g_high_res_lock.Get());
base::AutoLock lock(*GetHighResLock());
UINT period = g_high_res_timer_enabled ? kMinTimerIntervalHighResMs
: kMinTimerIntervalLowResMs;
if (activating) {
......@@ -237,7 +238,7 @@ bool Time::ActivateHighResolutionTimer(bool activating) {
// static
bool Time::IsHighResolutionTimerInUse() {
base::AutoLock lock(g_high_res_lock.Get());
base::AutoLock lock(*GetHighResLock());
return g_high_res_timer_enabled && g_high_res_timer_count > 0;
}
......
......@@ -13,7 +13,6 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/debug/leak_annotations.h"
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
......@@ -89,14 +88,10 @@ const int kThreadFlushTimeoutMs = 3000;
#define MAX_TRACE_EVENT_FILTERS 32
// List of TraceEventFilter objects from the most recent tracing session.
base::LazyInstance<std::vector<std::unique_ptr<TraceEventFilter>>>::Leaky
g_category_group_filters = LAZY_INSTANCE_INITIALIZER;
// The name of the current thread. This is used to decide if the current
// thread name has changed. We combine all the seen thread names into the
// output name for the thread.
LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
LAZY_INSTANCE_INITIALIZER;
std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
static auto filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
return *filters;
}
ThreadTicks ThreadNow() {
return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
......@@ -169,8 +164,8 @@ void ForEachCategoryFilter(const unsigned char* category_group_enabled,
CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
uint32_t filter_bitmap = category->enabled_filters();
for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
if (filter_bitmap & 1 && g_category_group_filters.Get()[index])
filter_fn(g_category_group_filters.Get()[index].get());
if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
filter_fn(GetCategoryGroupFilters()[index].get());
}
}
......@@ -473,7 +468,7 @@ void TraceLog::UpdateCategoryState(TraceCategory* category) {
for (const auto& event_filter : enabled_event_filters_) {
if (event_filter.IsCategoryGroupEnabled(category->name())) {
state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
DCHECK(g_category_group_filters.Get()[index]);
DCHECK(GetCategoryGroupFilters()[index]);
enabled_filters_bitmap |= 1 << index;
}
if (index++ >= MAX_TRACE_EVENT_FILTERS) {
......@@ -499,11 +494,11 @@ void TraceLog::CreateFiltersForTraceConfig() {
// Filters were already added and tracing could be enabled. Filters list
// cannot be changed when trace events are using them.
if (g_category_group_filters.Get().size())
if (GetCategoryGroupFilters().size())
return;
for (auto& filter_config : enabled_event_filters_) {
if (g_category_group_filters.Get().size() >= MAX_TRACE_EVENT_FILTERS) {
if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
NOTREACHED()
<< "Too many trace event filters installed in the current session";
break;
......@@ -522,7 +517,7 @@ void TraceLog::CreateFiltersForTraceConfig() {
new_filter = filter_factory_for_testing_(predicate_name);
CHECK(new_filter) << "Unknown trace filter " << predicate_name;
}
g_category_group_filters.Get().push_back(std::move(new_filter));
GetCategoryGroupFilters().push_back(std::move(new_filter));
}
}
......@@ -589,7 +584,7 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config,
// cleared at the end of tracing because some threads which hit trace event
// when disabling, could try to use the filters.
if (!enabled_modes_)
g_category_group_filters.Get().clear();
GetCategoryGroupFilters().clear();
// Update trace config for recording.
const bool already_recording = enabled_modes_ & RECORDING_MODE;
......@@ -1220,9 +1215,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// call (if any), but don't bother if the new name is empty. Note this will
// not detect a thread name change within the same char* buffer address: we
// favor common case performance over corner case correctness.
if (new_name != g_current_thread_name.Get().Get() && new_name &&
*new_name) {
g_current_thread_name.Get().Set(new_name);
static auto current_thread_name = new ThreadLocalPointer<const char>();
if (new_name != current_thread_name->Get() && new_name && *new_name) {
current_thread_name->Set(new_name);
AutoLock thread_info_lock(thread_info_lock_);
......
......@@ -11,7 +11,6 @@
#include "base/debug/alias.h"
#include "base/debug/stack_trace.h"
#include "base/hash.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/synchronization/lock_impl.h"
......@@ -43,10 +42,13 @@ struct Info {
};
typedef std::unordered_map<HANDLE, Info, HandleHash> HandleMap;
// g_lock protects the handle map and setting g_active_verifier within this
// GetLock() protects the handle map and setting g_active_verifier within this
// module.
typedef base::internal::LockImpl NativeLock;
base::LazyInstance<NativeLock>::Leaky g_lock = LAZY_INSTANCE_INITIALIZER;
NativeLock* GetLock() {
static auto native_lock = new NativeLock();
return native_lock;
}
// Simple automatic locking using a native critical section so it supports
// recursive locking.
......@@ -70,9 +72,7 @@ class AutoNativeLock {
// way to delete this object from the wrong side of it (or any side, actually).
class ActiveVerifier {
public:
explicit ActiveVerifier(bool enabled)
: enabled_(enabled), lock_(g_lock.Pointer()) {
}
explicit ActiveVerifier(bool enabled) : enabled_(enabled), lock_(GetLock()) {}
// Retrieves the current verifier.
static ActiveVerifier* Get();
......@@ -117,11 +117,11 @@ bool CloseHandleWrapper(HANDLE handle) {
return true;
}
// Assigns the g_active_verifier global within the g_lock lock.
// Assigns the g_active_verifier global within the GetLock() lock.
// If |existing_verifier| is non-null then |enabled| is ignored.
void ThreadSafeAssignOrCreateActiveVerifier(ActiveVerifier* existing_verifier,
bool enabled) {
AutoNativeLock lock(g_lock.Get());
AutoNativeLock lock(*GetLock());
// Another thread in this module might be trying to assign the global
// verifier, so check that within the lock here.
if (g_active_verifier)
......
......@@ -32,7 +32,6 @@
#include "base/base_switches.h"
#include "base/command_line.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/strings/string_util.h"
......@@ -67,44 +66,6 @@ void __cdecl ForceCrashOnSigAbort(int) {
*((volatile int*)0) = 0x1337;
}
typedef decltype(GetProcessMitigationPolicy)* GetProcessMitigationPolicyType;
class LazyIsUser32AndGdi32Available {
public:
LazyIsUser32AndGdi32Available() : value_(!IsWin32kSyscallsDisabled()) {}
~LazyIsUser32AndGdi32Available() {}
bool value() { return value_; }
private:
static bool IsWin32kSyscallsDisabled() {
// Can't disable win32k prior to windows 8.
if (base::win::GetVersion() < base::win::VERSION_WIN8)
return false;
GetProcessMitigationPolicyType get_process_mitigation_policy_func =
reinterpret_cast<GetProcessMitigationPolicyType>(GetProcAddress(
GetModuleHandle(L"kernel32.dll"), "GetProcessMitigationPolicy"));
if (!get_process_mitigation_policy_func)
return false;
PROCESS_MITIGATION_SYSTEM_CALL_DISABLE_POLICY policy = {};
if (get_process_mitigation_policy_func(GetCurrentProcess(),
ProcessSystemCallDisablePolicy,
&policy, sizeof(policy))) {
return policy.DisallowWin32kSystemCalls != 0;
}
return false;
}
const bool value_;
DISALLOW_COPY_AND_ASSIGN(LazyIsUser32AndGdi32Available);
};
// Returns the current platform role. We use the PowerDeterminePlatformRoleEx
// API for that.
POWER_PLATFORM_ROLE GetPlatformRole() {
......@@ -540,9 +501,32 @@ void SetDomainStateForTesting(bool state) {
}
bool IsUser32AndGdi32Available() {
static base::LazyInstance<LazyIsUser32AndGdi32Available>::Leaky available =
LAZY_INSTANCE_INITIALIZER;
return available.Get().value();
static auto is_user32_and_gdi32_available = []() {
// If win32k syscalls aren't disabled, then user32 and gdi32 are available.
// Can't disable win32k prior to windows 8.
if (base::win::GetVersion() < base::win::VERSION_WIN8)
return true;
typedef decltype(
GetProcessMitigationPolicy)* GetProcessMitigationPolicyType;
GetProcessMitigationPolicyType get_process_mitigation_policy_func =
reinterpret_cast<GetProcessMitigationPolicyType>(GetProcAddress(
GetModuleHandle(L"kernel32.dll"), "GetProcessMitigationPolicy"));
if (!get_process_mitigation_policy_func)
return true;
PROCESS_MITIGATION_SYSTEM_CALL_DISABLE_POLICY policy = {};
if (get_process_mitigation_policy_func(GetCurrentProcess(),
ProcessSystemCallDisablePolicy,
&policy, sizeof(policy))) {
return policy.DisallowWin32kSystemCalls == 0;
}
return true;
}();
return is_user32_and_gdi32_available;
}
bool GetLoadedModulesSnapshot(HANDLE process, std::vector<HMODULE>* snapshot) {
......
......@@ -977,10 +977,6 @@ void ChromeMainDelegate::ProcessExiting(const std::string& process_type) {
// Android doesn't use InitChromeLogging, so we close the log file manually.
logging::CloseLogFile();
#endif // !defined(OS_ANDROID)
#if defined(OS_WIN)
base::debug::RemoveHandleHooks();
#endif
}
#if defined(OS_MACOSX)
......