/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "mozilla/ThreadEventQueue.h" #include "mozilla/EventQueue.h" #include "LeakRefPtr.h" #include "nsComponentManagerUtils.h" #include "nsIThreadInternal.h" #include "nsThreadUtils.h" #include "nsThread.h" #include "PrioritizedEventQueue.h" #include "ThreadEventTarget.h" #include "mozilla/TaskController.h" using namespace mozilla; template class ThreadEventQueue::NestedSink : public ThreadTargetSink { public: NestedSink(EventQueue* aQueue, ThreadEventQueue* aOwner) : mQueue(aQueue), mOwner(aOwner) {} bool PutEvent(already_AddRefed&& aEvent, EventQueuePriority aPriority) final { return mOwner->PutEventInternal(std::move(aEvent), aPriority, this); } void Disconnect(const MutexAutoLock& aProofOfLock) final { mQueue = nullptr; } size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const { if (mQueue) { return mQueue->SizeOfIncludingThis(aMallocSizeOf); } return 0; } private: friend class ThreadEventQueue; // This is a non-owning reference. It must live at least until Disconnect is // called to clear it out. EventQueue* mQueue; RefPtr mOwner; }; template ThreadEventQueue::ThreadEventQueue(UniquePtr aQueue, bool aIsMainThread) : mBaseQueue(std::move(aQueue)), mLock("ThreadEventQueue"), mEventsAvailable(mLock, "EventsAvail") { if (UseTaskController() && aIsMainThread) { TaskController::Get()->SetConditionVariable(&mEventsAvailable); } static_assert(std::is_base_of::value, "InnerQueueT must be an AbstractEventQueue subclass"); } template ThreadEventQueue::~ThreadEventQueue() { MOZ_ASSERT(mNestedQueues.IsEmpty()); } template bool ThreadEventQueue::PutEvent( already_AddRefed&& aEvent, EventQueuePriority aPriority) { return PutEventInternal(std::move(aEvent), aPriority, nullptr); } template bool ThreadEventQueue::PutEventInternal( already_AddRefed&& aEvent, EventQueuePriority aPriority, NestedSink* aSink) { // We want to leak the reference when we fail to dispatch it, so that // we won't release the event in a wrong thread. LeakRefPtr event(std::move(aEvent)); nsCOMPtr obs; { // Check if the runnable wants to override the passed-in priority. // Do this outside the lock, so runnables implemented in JS can QI // (and possibly GC) outside of the lock. if (InnerQueueT::SupportsPrioritization) { auto* e = event.get(); // can't do_QueryInterface on LeakRefPtr. if (nsCOMPtr runnablePrio = do_QueryInterface(e)) { uint32_t prio = nsIRunnablePriority::PRIORITY_NORMAL; runnablePrio->GetPriority(&prio); if (prio == nsIRunnablePriority::PRIORITY_HIGH) { aPriority = EventQueuePriority::High; } else if (prio == nsIRunnablePriority::PRIORITY_INPUT_HIGH) { aPriority = EventQueuePriority::InputHigh; } else if (prio == nsIRunnablePriority::PRIORITY_MEDIUMHIGH) { aPriority = EventQueuePriority::MediumHigh; } else if (prio == nsIRunnablePriority::PRIORITY_DEFERRED_TIMERS) { aPriority = EventQueuePriority::DeferredTimers; } else if (prio == nsIRunnablePriority::PRIORITY_IDLE) { aPriority = EventQueuePriority::Idle; } } } MutexAutoLock lock(mLock); if (mEventsAreDoomed) { return false; } if (aSink) { if (!aSink->mQueue) { return false; } aSink->mQueue->PutEvent(event.take(), aPriority, lock); } else { mBaseQueue->PutEvent(event.take(), aPriority, lock); } mEventsAvailable.Notify(); // Make sure to grab the observer before dropping the lock, otherwise the // event that we just placed into the queue could run and eventually delete // this nsThread before the calling thread is scheduled again. We would then // crash while trying to access a dead nsThread. obs = mObserver; } if (obs) { obs->OnDispatchedEvent(); } return true; } template already_AddRefed ThreadEventQueue::GetEvent( bool aMayWait, EventQueuePriority* aPriority, mozilla::TimeDuration* aLastEventDelay) { nsCOMPtr event; bool eventIsIdleRunnable = false; // This will be the IdlePeriodState for the queue the event, if any, // came from. May be null all along. IdlePeriodState* idleState = nullptr; { // Scope for lock. When we are about to return, we will exit this // scope so we can do some work after releasing the lock but // before returning. MutexAutoLock lock(mLock); for (;;) { const bool noNestedQueue = mNestedQueues.IsEmpty(); if (noNestedQueue) { idleState = mBaseQueue->GetIdlePeriodState(); event = mBaseQueue->GetEvent(aPriority, lock, aLastEventDelay, &eventIsIdleRunnable); } else { // We always get events from the topmost queue when there are nested // queues. MOZ_ASSERT(!mNestedQueues.LastElement().mQueue->GetIdlePeriodState()); event = mNestedQueues.LastElement().mQueue->GetEvent( aPriority, lock, aLastEventDelay, &eventIsIdleRunnable); MOZ_ASSERT(!eventIsIdleRunnable); } if (event) { break; } if (idleState) { MOZ_ASSERT(noNestedQueue); if (mBaseQueue->HasIdleRunnables(lock)) { // We have idle runnables that we may not have gotten above because // our idle state is not up to date. We need to update the idle state // and try again. We need to temporarily release the lock while we do // that. MutexAutoUnlock unlock(mLock); idleState->UpdateCachedIdleDeadline(unlock); } else { // We need to notify our idle state that we're out of tasks to run. // This needs to be done while not holding the lock. MutexAutoUnlock unlock(mLock); idleState->RanOutOfTasks(unlock); } // When we unlocked, someone may have queued a new runnable on us. So // we _must_ try to get a runnable again before we start sleeping, since // that might be the runnable we were waiting for. MOZ_ASSERT( noNestedQueue == mNestedQueues.IsEmpty(), "Who is pushing nested queues on us from some other thread?"); event = mBaseQueue->GetEvent(aPriority, lock, aLastEventDelay, &eventIsIdleRunnable); // Now clear the cached idle deadline, because it was specific to this // GetEvent() call. idleState->ClearCachedIdleDeadline(); if (event) { break; } } // No runnable available. Sleep waiting for one if if we're supposed to. // Otherwise just go ahead and return null. if (!aMayWait) { break; } AUTO_PROFILER_LABEL("ThreadEventQueue::GetEvent::Wait", IDLE); mEventsAvailable.Wait(); } } if (idleState) { // The pending task guarantee is not needed anymore, since we just tried // doing GetEvent(). idleState->ForgetPendingTaskGuarantee(); if (event && !eventIsIdleRunnable) { // We don't have a MutexAutoUnlock to pass to the callee here. We _could_ // have one if we wanted to, simply by moving this into the same scope as // our MutexAutoLock and adding a MutexAutoUnlock, but then we'd be doing // an extra lock/unlock pair on mLock, which seems uncalled-for. idleState->FlagNotIdle(); } } return event.forget(); } template void ThreadEventQueue::DidRunEvent() { MutexAutoLock lock(mLock); if (mNestedQueues.IsEmpty()) { mBaseQueue->DidRunEvent(lock); // Don't do anything else here, because that call might have // temporarily unlocked the lock. } else { mNestedQueues.LastElement().mQueue->DidRunEvent(lock); } } template bool ThreadEventQueue::HasPendingEvent() { MutexAutoLock lock(mLock); // We always get events from the topmost queue when there are nested queues. if (mNestedQueues.IsEmpty()) { return mBaseQueue->HasReadyEvent(lock); } else { return mNestedQueues.LastElement().mQueue->HasReadyEvent(lock); } } template bool ThreadEventQueue::HasPendingHighPriorityEvents() { MutexAutoLock lock(mLock); // We always get events from the topmost queue when there are nested queues. if (mNestedQueues.IsEmpty()) { return mBaseQueue->HasPendingHighPriorityEvents(lock); } else { return mNestedQueues.LastElement().mQueue->HasPendingHighPriorityEvents( lock); } } template bool ThreadEventQueue::ShutdownIfNoPendingEvents() { MutexAutoLock lock(mLock); if (mNestedQueues.IsEmpty() && mBaseQueue->IsEmpty(lock)) { mEventsAreDoomed = true; return true; } return false; } template void ThreadEventQueue::EnableInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->EnableInputEventPrioritization(lock); } template void ThreadEventQueue::FlushInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->FlushInputEventPrioritization(lock); } template void ThreadEventQueue::SuspendInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->SuspendInputEventPrioritization(lock); } template void ThreadEventQueue::ResumeInputEventPrioritization() { MutexAutoLock lock(mLock); mBaseQueue->ResumeInputEventPrioritization(lock); } template already_AddRefed ThreadEventQueue::PushEventQueue() { auto queue = MakeUnique(); RefPtr sink = new NestedSink(queue.get(), this); RefPtr eventTarget = new ThreadEventTarget(sink, NS_IsMainThread()); MutexAutoLock lock(mLock); mNestedQueues.AppendElement(NestedQueueItem(std::move(queue), eventTarget)); return eventTarget.forget(); } template void ThreadEventQueue::PopEventQueue(nsIEventTarget* aTarget) { MutexAutoLock lock(mLock); MOZ_ASSERT(!mNestedQueues.IsEmpty()); NestedQueueItem& item = mNestedQueues.LastElement(); MOZ_ASSERT(aTarget == item.mEventTarget); // Disconnect the event target that will be popped. item.mEventTarget->Disconnect(lock); AbstractEventQueue* prevQueue = mNestedQueues.Length() == 1 ? static_cast(mBaseQueue.get()) : static_cast( mNestedQueues[mNestedQueues.Length() - 2].mQueue.get()); // Move events from the old queue to the new one. nsCOMPtr event; EventQueuePriority prio; TimeDuration delay; while ((event = item.mQueue->GetEvent(&prio, lock, &delay))) { // preserve the event delay so far prevQueue->PutEvent(event.forget(), prio, lock, &delay); } mNestedQueues.RemoveLastElement(); } template size_t ThreadEventQueue::SizeOfExcludingThis( mozilla::MallocSizeOf aMallocSizeOf) const { size_t n = 0; n += mBaseQueue->SizeOfIncludingThis(aMallocSizeOf); n += mNestedQueues.ShallowSizeOfExcludingThis(aMallocSizeOf); for (auto& queue : mNestedQueues) { n += queue.mEventTarget->SizeOfIncludingThis(aMallocSizeOf); } return SynchronizedEventQueue::SizeOfExcludingThis(aMallocSizeOf) + n; } template already_AddRefed ThreadEventQueue::GetObserver() { MutexAutoLock lock(mLock); return do_AddRef(mObserver); } template already_AddRefed ThreadEventQueue::GetObserverOnThread() { return do_AddRef(mObserver); } template void ThreadEventQueue::SetObserver(nsIThreadObserver* aObserver) { MutexAutoLock lock(mLock); mObserver = aObserver; if (UseTaskController() && NS_IsMainThread()) { TaskController::Get()->SetThreadObserver(aObserver); } } namespace mozilla { template class ThreadEventQueue; template class ThreadEventQueue; } // namespace mozilla