Everywhere: Replace a bundle of dbg with dbgln.

These changes are arbitrarily divided into multiple commits to make it
easier to find potentially introduced bugs with git bisect.

This commit touches some dbg() calls which are enclosed in macros. This
should be fine because with the new constexpr stuff, we ensure that the
stuff actually compiles.
This commit is contained in:
asynts 2021-01-12 22:30:52 +01:00 committed by Andreas Kling
parent adbb8d62d1
commit 94bb544c33
6 changed files with 154 additions and 103 deletions

71
AK/Debug.h Normal file
View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
// FIXME: We could generate this file with CMake and configure.
#ifdef PROCESS_DEBUG
constexpr bool debug_process = true;
#else
constexpr bool debug_process = false;
#endif
#ifdef SCHEDULER_DEBUG
constexpr bool debug_scheduler = true;
#else
constexpr bool debug_scheduler = false;
#endif
#ifdef SCHEDULER_RUNNABLE_DEBUG
constexpr bool debug_scheduler_runnable = true;
#else
constexpr bool debug_scheduler_runnable = false;
#endif
#ifdef SHARED_BUFFER_DEBUG
constexpr bool debug_shared_buffer = true;
#else
constexpr bool debug_shared_buffer = false;
#endif
#ifdef THREAD_DEBUG
constexpr bool debug_thread = true;
#else
constexpr bool debug_thread = false;
#endif
#ifdef LOCK_DEBUG
constexpr bool debug_lock = true;
#else
constexpr bool debug_lock = false;
#endif
#ifdef SIGNAL_DEBUG
constexpr bool debug_signal = true;
#else
constexpr bool debug_signal = false;
#endif

View file

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/Debug.h>
#include <AK/Demangle.h>
#include <AK/QuickSort.h>
#include <AK/StdLibExtras.h>
@ -355,9 +356,7 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
, m_ppid(ppid)
, m_wait_block_condition(*this)
{
#ifdef PROCESS_DEBUG
dbg() << "Created new process " << m_name << "(" << m_pid.value() << ")";
#endif
dbgln<debug_process>("Created new process {}({})", m_name, m_pid.value());
m_page_directory = PageDirectory::create_for_userspace(*this, fork_parent ? &fork_parent->page_directory().range_allocator() : nullptr);
@ -620,9 +619,8 @@ bool Process::dump_perfcore()
void Process::finalize()
{
ASSERT(Thread::current() == g_finalizer);
#ifdef PROCESS_DEBUG
dbg() << "Finalizing process " << *this;
#endif
dbgln<debug_process>("Finalizing process {}", *this);
if (is_dumpable()) {
if (m_should_dump_core)

View file

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/Debug.h>
#include <AK/QuickSort.h>
#include <AK/ScopeGuard.h>
#include <AK/TemporaryChange.h>
@ -131,28 +132,46 @@ bool Scheduler::pick_next()
// no longer want to schedule this thread. We can't wait until
// Scheduler::enter_current because we don't want to allow it to
// transition back to user mode.
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Thread " << *current_thread << " is dying";
#endif
if constexpr (debug_scheduler)
dbgln("Scheduler[{}]: Thread {} is dying", Processor::current().id(), *current_thread);
current_thread->set_state(Thread::Dying);
}
#ifdef SCHEDULER_RUNNABLE_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Non-runnables:";
Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision {
if (thread.state() == Thread::Dying)
dbg() << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::formatted("{:04x}:{:08x}", thread.tss().cs, thread.tss().eip) << " Finalizable: " << thread.is_finalizable();
else
dbg() << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::formatted("{:04x}:{:08x}", thread.tss().cs, thread.tss().eip);
return IterationDecision::Continue;
});
if constexpr (debug_scheduler_runnable) {
dbgln("Scheduler[{}j]: Non-runnables:", Processor::current().id());
Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision {
if (thread.state() == Thread::Dying) {
dbgln(" {:12} {} @ {:04x}:{:08x} Finalizable: {}",
thread.state_string(),
thread,
thread.tss().cs,
thread.tss().eip,
thread.is_finalizable());
} else {
dbgln(" {:12} {} @ {:04x}:{:08x}",
thread.state_string(),
thread,
thread.tss().cs,
thread.tss().eip);
}
dbg() << "Scheduler[" << Processor::current().id() << "]: Runnables:";
Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
dbg() << " " << String::format("%3u", thread.effective_priority()) << "/" << String::format("%2u", thread.priority()) << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::formatted("{:04x}:{:08x}", thread.tss().cs, thread.tss().eip);
return IterationDecision::Continue;
});
#endif
return IterationDecision::Continue;
});
dbgln("Scheduler[{}j]: Runnables:", Processor::current().id());
Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
dbgln(" {:3}/{:2} {:12} @ {:04x}:{:08x}",
thread.effective_priority(),
thread.priority(),
thread.state_string(),
thread.tss().cs,
thread.tss().eip);
return IterationDecision::Continue;
});
}
Thread* thread_to_schedule = nullptr;
@ -181,9 +200,7 @@ bool Scheduler::pick_next()
// but since we're still holding the scheduler lock we're still in a critical section
critical.leave();
#ifdef SCHEDULER_DEBUG
dbg() << "Processing pending donate to " << *thread_to_schedule << " reason=" << reason;
#endif
dbgln<debug_scheduler>("Processing pending donate to {} reason={}", *thread_to_schedule, reason);
return donate_to_and_switch(thread_to_schedule, reason);
}
@ -211,9 +228,12 @@ bool Scheduler::pick_next()
if (!thread_to_schedule)
thread_to_schedule = Processor::current().idle_thread();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Switch to " << *thread_to_schedule << " @ " << String::format("%04x:%08x", thread_to_schedule->tss().cs, thread_to_schedule->tss().eip);
#endif
if constexpr (debug_scheduler) {
dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}",
Processor::current().id(),
*thread_to_schedule,
thread_to_schedule->tss().cs, thread_to_schedule->tss().eip);
}
// We need to leave our first critical section before switching context,
// but since we're still holding the scheduler lock we're still in a critical section
@ -234,9 +254,7 @@ bool Scheduler::yield()
scheduler_data.m_pending_donate_reason = nullptr;
auto current_thread = Thread::current();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << proc.id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq();
#endif
dbgln<debug_scheduler>("Scheduler[{}]: yielding thread {} in_irq={}", proc.id(), *current_thread, proc.in_irq());
ASSERT(current_thread != nullptr);
if (proc.in_irq() || proc.in_critical()) {
// If we're handling an IRQ we can't switch context, or we're in
@ -248,9 +266,9 @@ bool Scheduler::yield()
if (!Scheduler::pick_next())
return false;
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: yield returns to thread " << *current_thread << " in_irq: " << Processor::current().in_irq();
#endif
if constexpr (debug_scheduler)
dbgln("Scheduler[{}]: yield returns to thread {} in_irq={}", Processor::current().id(), *current_thread, Processor::current().in_irq());
return true;
}
@ -266,9 +284,7 @@ bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const
return Scheduler::yield();
unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << proc.id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason;
#endif
dbgln<debug_scheduler>("Scheduler[{}]: Donating {} ticks to {}, reason={}", proc.id(), ticks_to_donate, *beneficiary, reason);
beneficiary->set_ticks_left(ticks_to_donate);
return Scheduler::context_switch(beneficiary);

View file

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/Debug.h>
#include <AK/Singleton.h>
#include <Kernel/Process.h>
#include <Kernel/SharedBuffer.h>
@ -155,13 +156,9 @@ void SharedBuffer::deref_for_process(Process& process)
ASSERT(m_total_refs > 0);
m_total_refs--;
if (ref.count == 0) {
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Releasing shared buffer reference on " << m_shbuf_id << " of size " << size() << " by PID " << process.pid().value();
#endif
dbgln<debug_shared_buffer>("Releasing shared buffer reference on {} of size {} by PID {}", m_shbuf_id, size(), process.pid().value());
process.deallocate_region(*ref.region.unsafe_ptr()); // TODO: Region needs to be RefCounted!
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Released shared buffer reference on " << m_shbuf_id << " of size " << size() << " by PID " << process.pid().value();
#endif
dbgln<debug_shared_buffer>("Released shared buffer reference on {} of size {} by PID {}", m_shbuf_id, size(), process.pid().value());
sanity_check("deref_for_process");
destroy_if_unused();
return;
@ -179,15 +176,11 @@ bool SharedBuffer::disown(ProcessID pid)
for (size_t i = 0; i < m_refs.size(); ++i) {
auto& ref = m_refs[i];
if (ref.pid == pid) {
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Disowning shared buffer " << m_shbuf_id << " of size " << size() << " by PID " << pid.value();
#endif
dbgln<debug_shared_buffer>("Disowning shared buffer {} of size {} by PID {}", m_shbuf_id, size(), pid.value());
ASSERT(m_total_refs >= ref.count);
m_total_refs -= ref.count;
m_refs.unstable_take(i);
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Disowned shared buffer " << m_shbuf_id << " of size " << size() << " by PID " << pid.value();
#endif
dbgln<debug_shared_buffer>("Disowned shared buffer {} of size {} by PID {}", m_shbuf_id, size(), pid.value());
destroy_if_unused();
break;
}
@ -201,9 +194,7 @@ void SharedBuffer::destroy_if_unused()
LOCKER(shared_buffers().lock());
sanity_check("destroy_if_unused");
if (m_total_refs == 0) {
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Destroying unused SharedBuffer{" << this << "} id: " << m_shbuf_id;
#endif
dbgln<debug_shared_buffer>("Destroying unused SharedBuffer({}) id={}", this, m_shbuf_id);
auto count_before = shared_buffers().resource().size();
shared_buffers().resource().remove(m_shbuf_id);
ASSERT(count_before != shared_buffers().resource().size());

View file

@ -26,6 +26,7 @@
#pragma once
#include <AK/Debug.h>
#include <AK/OwnPtr.h>
#include <AK/WeakPtr.h>
#include <Kernel/VM/AnonymousVMObject.h>
@ -52,16 +53,12 @@ public:
: m_shbuf_id(id)
, m_vmobject(move(vmobject))
{
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Created shared buffer " << m_shbuf_id << " of size " << m_vmobject->size();
#endif
dbgln<debug_shared_buffer>("Created shared buffer {} of size {}", m_shbuf_id, size());
}
~SharedBuffer()
{
#ifdef SHARED_BUFFER_DEBUG
dbg() << "Destroyed shared buffer " << m_shbuf_id << " of size " << size();
#endif
dbgln<debug_shared_buffer>("Destroyed shared buffer {} of size {}", m_shbuf_id, size());
}
void sanity_check(const char* what);

View file

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/Debug.h>
#include <AK/Demangle.h>
#include <AK/ScopeGuard.h>
#include <AK/StringBuilder.h>
@ -61,9 +62,8 @@ Thread::Thread(NonnullRefPtr<Process> process)
} else {
m_tid = Process::allocate_pid().value();
}
#ifdef THREAD_DEBUG
dbg() << "Created new thread " << m_process->name() << "(" << m_process->pid().value() << ":" << m_tid.value() << ")";
#endif
if constexpr (debug_thread)
dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
set_default_signal_dispositions();
m_fpu_state = (FPUState*)kmalloc_aligned<16>(sizeof(FPUState));
reset_fpu_state();
@ -182,9 +182,7 @@ void Thread::unblock(u8 signal)
void Thread::set_should_die()
{
if (m_should_die) {
#ifdef THREAD_DEBUG
dbg() << *this << " Should already die";
#endif
dbgln("{} Should already die", *this);
return;
}
ScopedCritical critical;
@ -369,9 +367,7 @@ void Thread::finalize()
{
ScopedSpinLock lock(g_scheduler_lock);
#ifdef THREAD_DEBUG
dbg() << "Finalizing thread " << *this;
#endif
dbgln<debug_thread>("Finalizing thread {}", *this);
set_state(Thread::State::Dead);
m_join_condition.thread_finalizing();
}
@ -475,18 +471,16 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if (should_ignore_signal(signal)) {
#ifdef SIGNAL_DEBUG
dbg() << "Signal " << signal << " was ignored by " << process();
#endif
dbgln<debug_signal>("Signal {} was ignored by {}", signal, process());
return;
}
#ifdef SIGNAL_DEBUG
if (sender)
dbg() << "Signal: " << *sender << " sent " << signal << " to " << process();
else
dbg() << "Signal: Kernel sent " << signal << " to " << process();
#endif
if constexpr (debug_signal) {
if (sender)
dbgln("Signal: {} sent {} to {}", *sender, signal, process());
else
dbgln("Signal: Kernel send {} to {}", signal, process());
}
m_pending_signals |= 1 << (signal - 1);
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
@ -494,16 +488,12 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
if (m_state == Stopped) {
ScopedSpinLock lock(m_lock);
if (pending_signals_for_state()) {
#ifdef SIGNAL_DEBUG
dbg() << "Signal: Resuming stopped " << *this << " to deliver signal " << signal;
#endif
dbgln<debug_signal>("Signal: Resuming stopped {} to deliver signal {}", *this, signal);
resume_from_stopped();
}
} else {
ScopedSpinLock block_lock(m_block_lock);
#ifdef SIGNAL_DEBUG
dbg() << "Signal: Unblocking " << *this << " to deliver signal " << signal;
#endif
dbgln<debug_signal>("Signal: Unblocking {} to deliver signal {}", *this, signal);
unblock(signal);
}
}
@ -728,26 +718,20 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
auto& process = this->process();
auto tracer = process.tracer();
if (signal == SIGSTOP || (tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
#ifdef SIGNAL_DEBUG
dbg() << "signal: signal " << signal << " stopping thread " << *this;
#endif
dbgln<debug_signal>("signal: signal {} sopping thread {}", signal, *this);
set_state(State::Stopped, signal);
return DispatchSignalResult::Yield;
}
if (signal == SIGCONT) {
#ifdef SIGNAL_DEBUG
dbg() << "signal: SIGCONT resuming " << *this;
#endif
dbgln("signal: SIGCONT resuming {}", *this);
} else {
if (tracer) {
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
if (!tracer->has_pending_signal(signal)) {
#ifdef SIGNAL_DEBUG
dbg() << "signal: " << signal << " stopping " << *this << " for tracer";
#endif
dbgln("signal: {} stopping {} for tracer", signal, *this);
set_state(Stopped, signal);
return DispatchSignalResult::Yield;
}
@ -897,15 +881,13 @@ void Thread::set_state(State new_state, u8 stop_signal)
if (previous_state == Invalid) {
// If we were *just* created, we may have already pending signals
if (has_unmasked_pending_signals()) {
dbgln("Dispatch pending signals to new thread {}", *this);
dbgln<debug_thread>("Dispatch pending signals to new thread {}", *this);
dispatch_one_pending_signal();
}
}
m_state = new_state;
#ifdef THREAD_DEBUG
dbg() << "Set Thread " << *this << " state to " << state_string();
#endif
dbgln<debug_thread>("Set thread {} state to {}", *this, state_string());
}
if (m_process->pid() != 0) {
@ -920,9 +902,7 @@ void Thread::set_state(State new_state, u8 stop_signal)
process.for_each_thread([&](auto& thread) {
if (&thread == this || !thread.is_stopped())
return IterationDecision::Continue;
#ifdef THREAD_DEBUG
dbg() << "Resuming peer thread " << thread;
#endif
dbgln<debug_thread>("Resuming peer thread {}", thread);
thread.resume_from_stopped();
return IterationDecision::Continue;
});
@ -938,9 +918,7 @@ void Thread::set_state(State new_state, u8 stop_signal)
process.for_each_thread([&](auto& thread) {
if (&thread == this || thread.is_stopped())
return IterationDecision::Continue;
#ifdef THREAD_DEBUG
dbg() << "Stopping peer thread " << thread;
#endif
dbgln<debug_thread>("Stopping peer thread {}", thread);
thread.set_state(Stopped, stop_signal);
return IterationDecision::Continue;
});