Merge branch 'rework/kthreads' into for-linus

This commit is contained in:
Petr Mladek 2022-06-23 19:11:28 +02:00
commit 51889d225c
12 changed files with 65 additions and 640 deletions

View file

@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start();
rcu_read_lock();
printk_prefer_direct_enter();
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont("\n");
console_loglevel = orig_log_level;
}
printk_prefer_direct_exit();
rcu_read_unlock();
rcu_sysrq_end();

View file

@ -16,7 +16,6 @@
#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/mutex.h>
struct vc_data;
struct console_font_op;
@ -154,22 +153,6 @@ struct console {
uint ospeed;
u64 seq;
unsigned long dropped;
struct task_struct *thread;
bool blocked;
/*
* The per-console lock is used by printing kthreads to synchronize
* this console with callers of console_lock(). This is necessary in
* order to allow printing kthreads to run in parallel to each other,
* while each safely accessing the @blocked field and synchronizing
* against direct printing via console_lock/console_unlock.
*
* Note: For synchronizing against direct printing via
* console_trylock/console_unlock, see the static global
* variable @console_kthreads_active.
*/
struct mutex lock;
void *data;
struct console *next;
};

View file

@ -169,11 +169,7 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit
extern void printk_prefer_direct_enter(void);
extern void printk_prefer_direct_exit(void);
extern bool pr_flush(int timeout_ms, bool reset_on_progress);
extern void try_block_console_kthreads(int timeout_ms);
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@ -225,23 +221,11 @@ static inline void printk_deferred_exit(void)
{
}
static inline void printk_prefer_direct_enter(void)
{
}
static inline void printk_prefer_direct_exit(void)
{
}
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return true;
}
static inline void try_block_console_kthreads(int timeout_ms)
{
}
static inline int printk_ratelimit(void)
{
return 0;

View file

@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* complain:
*/
if (sysctl_hung_task_warnings) {
printk_prefer_direct_enter();
if (sysctl_hung_task_warnings > 0)
sysctl_hung_task_warnings--;
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (sysctl_hung_task_all_cpu_backtrace)
hung_task_show_all_bt = true;
printk_prefer_direct_exit();
}
touch_nmi_watchdog();
@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
}
unlock:
rcu_read_unlock();
if (hung_task_show_lock) {
printk_prefer_direct_enter();
if (hung_task_show_lock)
debug_show_all_locks();
printk_prefer_direct_exit();
}
if (hung_task_show_all_bt) {
hung_task_show_all_bt = false;
printk_prefer_direct_enter();
trigger_all_cpu_backtrace();
printk_prefer_direct_exit();
}
if (hung_task_call_panic)

View file

@ -297,7 +297,6 @@ void panic(const char *fmt, ...)
* unfortunately means it may not be hardened to work in a
* panic situation.
*/
try_block_console_kthreads(10000);
smp_send_stop();
} else {
/*
@ -305,7 +304,6 @@ void panic(const char *fmt, ...)
* kmsg_dump, we will need architecture dependent extra
* works in addition to stopping other CPUs.
*/
try_block_console_kthreads(10000);
crash_smp_send_stop();
}
@ -605,8 +603,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
disable_trace_on_warning();
printk_prefer_direct_enter();
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
@ -636,8 +632,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
printk_prefer_direct_exit();
}
#ifndef __WARN_FLAGS

View file

@ -20,8 +20,6 @@ enum printk_info_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
extern bool block_console_kthreads;
__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,

View file

@ -223,36 +223,6 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
/* Number of registered extended console drivers. */
static int nr_ext_console_drivers;
/*
* Used to synchronize printing kthreads against direct printing via
* console_trylock/console_unlock.
*
* Values:
* -1 = console kthreads atomically blocked (via global trylock)
* 0 = no kthread printing, console not locked (via trylock)
* >0 = kthread(s) actively printing
*
* Note: For synchronizing against direct printing via
* console_lock/console_unlock, see the @lock variable in
* struct console.
*/
static atomic_t console_kthreads_active = ATOMIC_INIT(0);
#define console_kthreads_atomic_tryblock() \
(atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0)
#define console_kthreads_atomic_unblock() \
atomic_cmpxchg(&console_kthreads_active, -1, 0)
#define console_kthreads_atomically_blocked() \
(atomic_read(&console_kthreads_active) == -1)
#define console_kthread_printing_tryenter() \
atomic_inc_unless_negative(&console_kthreads_active)
#define console_kthread_printing_exit() \
atomic_dec(&console_kthreads_active)
/* Block console kthreads to avoid processing new messages. */
bool block_console_kthreads;
/*
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
@ -301,49 +271,14 @@ static bool panic_in_progress(void)
}
/*
* Tracks whether kthread printers are all blocked. A value of true implies
* that the console is locked via console_lock() or the console is suspended.
* Writing to this variable requires holding @console_sem.
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
* definitely not the perfect debug tool (we don't know if _WE_
* hold it and are racing, but it helps tracking those weird code
* paths in the console code where we end up in places I want
* locked without the console semaphore held).
*/
static bool console_kthreads_blocked;
/*
* Block all kthread printers from a schedulable context.
*
* Requires holding @console_sem.
*/
static void console_kthreads_block(void)
{
struct console *con;
for_each_console(con) {
mutex_lock(&con->lock);
con->blocked = true;
mutex_unlock(&con->lock);
}
console_kthreads_blocked = true;
}
/*
* Unblock all kthread printers from a schedulable context.
*
* Requires holding @console_sem.
*/
static void console_kthreads_unblock(void)
{
struct console *con;
for_each_console(con) {
mutex_lock(&con->lock);
con->blocked = false;
mutex_unlock(&con->lock);
}
console_kthreads_blocked = false;
}
static int console_suspended;
static int console_locked, console_suspended;
/*
* Array of consoles built from command line options (console=)
@ -426,75 +361,7 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static DEFINE_MUTEX(syslog_lock);
/*
* A flag to signify if printk_activate_kthreads() has already started the
* kthread printers. If true, any later registered consoles must start their
* own kthread directly. The flag is write protected by the console_lock.
*/
static bool printk_kthreads_available;
#ifdef CONFIG_PRINTK
static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
/**
* printk_prefer_direct_enter - cause printk() calls to attempt direct
* printing to all enabled consoles
*
* Since it is not possible to call into the console printing code from any
* context, there is no guarantee that direct printing will occur.
*
* This globally effects all printk() callers.
*
* Context: Any context.
*/
void printk_prefer_direct_enter(void)
{
atomic_inc(&printk_prefer_direct);
}
/**
* printk_prefer_direct_exit - restore printk() behavior
*
* Context: Any context.
*/
void printk_prefer_direct_exit(void)
{
WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
}
/*
* Calling printk() always wakes kthread printers so that they can
* flush the new message to their respective consoles. Also, if direct
* printing is allowed, printk() tries to flush the messages directly.
*
* Direct printing is allowed in situations when the kthreads
* are not available or the system is in a problematic state.
*
* See the implementation about possible races.
*/
static inline bool allow_direct_printing(void)
{
/*
* Checking kthread availability is a possible race because the
* kthread printers can become permanently disabled during runtime.
* However, doing that requires holding the console_lock, so any
* pending messages will be direct printed by console_unlock().
*/
if (!printk_kthreads_available)
return true;
/*
* Prefer direct printing when the system is in a problematic state.
* The context that sets this state will always see the updated value.
* The other contexts do not care. Anyway, direct printing is just a
* best effort. The direct output is only possible when console_lock
* is not already taken and no kthread printers are actively printing.
*/
return (system_state > SYSTEM_RUNNING ||
oops_in_progress ||
atomic_read(&printk_prefer_direct));
}
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
@ -2385,10 +2252,10 @@ asmlinkage int vprintk_emit(int facility, int level,
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
/* If called from the scheduler, we can not call up(). */
if (!in_sched && allow_direct_printing()) {
if (!in_sched) {
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during direct
* timing-sensitive locks. Disable preemption during
* printing of all remaining records to all consoles so that
* this context can return as soon as possible. Hopefully
* another printk() caller will take over the printing.
@ -2431,8 +2298,6 @@ EXPORT_SYMBOL(_printk);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
static void printk_start_kthread(struct console *con);
#else /* CONFIG_PRINTK */
#define CONSOLE_LOG_MAX 0
@ -2466,8 +2331,6 @@ static void call_console_driver(struct console *con, const char *text, size_t le
}
static bool suppress_message_printing(int level) { return false; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
static void printk_start_kthread(struct console *con) { }
static bool allow_direct_printing(void) { return true; }
#endif /* CONFIG_PRINTK */
@ -2686,14 +2549,6 @@ static int console_cpu_notify(unsigned int cpu)
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
else {
/*
* If a new CPU comes online, the conditions for
* printer_should_wake() may have changed for some
* kthread printer with !CON_ANYTIME.
*/
wake_up_klogd();
}
}
return 0;
}
@ -2713,7 +2568,7 @@ void console_lock(void)
down_console_sem();
if (console_suspended)
return;
console_kthreads_block();
console_locked = 1;
console_may_schedule = 1;
}
EXPORT_SYMBOL(console_lock);
@ -2734,30 +2589,15 @@ int console_trylock(void)
up_console_sem();
return 0;
}
if (!console_kthreads_atomic_tryblock()) {
up_console_sem();
return 0;
}
console_locked = 1;
console_may_schedule = 0;
return 1;
}
EXPORT_SYMBOL(console_trylock);
/*
* This is used to help to make sure that certain paths within the VT code are
* running with the console lock held. It is definitely not the perfect debug
* tool (it is not known if the VT code is the task holding the console lock),
* but it helps tracking those weird code paths in the console code such as
* when the console is suspended: where the console is not locked but no
* console printing may occur.
*
* Note: This returns true when the console is suspended but is not locked.
* This is intentional because the VT code must consider that situation
* the same as if the console was locked.
*/
int is_console_locked(void)
{
return (console_kthreads_blocked || atomic_read(&console_kthreads_active));
return console_locked;
}
EXPORT_SYMBOL(is_console_locked);
@ -2780,9 +2620,18 @@ static bool abandon_console_lock_in_panic(void)
return atomic_read(&panic_cpu) != raw_smp_processor_id();
}
static inline bool __console_is_usable(short flags)
/*
* Check if the given console is currently capable and allowed to print
* records.
*
* Requires the console_lock.
*/
static inline bool console_is_usable(struct console *con)
{
if (!(flags & CON_ENABLED))
if (!(con->flags & CON_ENABLED))
return false;
if (!con->write)
return false;
/*
@ -2791,43 +2640,15 @@ static inline bool __console_is_usable(short flags)
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
*/
if (!cpu_online(raw_smp_processor_id()) &&
!(flags & CON_ANYTIME))
!(con->flags & CON_ANYTIME))
return false;
return true;
}
/*
* Check if the given console is currently capable and allowed to print
* records.
*
* Requires holding the console_lock.
*/
static inline bool console_is_usable(struct console *con)
{
if (!con->write)
return false;
return __console_is_usable(con->flags);
}
static void __console_unlock(void)
{
/*
* Depending on whether console_lock() or console_trylock() was used,
* appropriately allow the kthread printers to continue.
*/
if (console_kthreads_blocked)
console_kthreads_unblock();
else
console_kthreads_atomic_unblock();
/*
* New records may have arrived while the console was locked.
* Wake the kthread printers to print them.
*/
wake_up_klogd();
console_locked = 0;
up_console_sem();
}
@ -2845,19 +2666,17 @@ static void __console_unlock(void)
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
* console_lock. Otherwise it is set to false. A NULL pointer may be provided
* to disable allowing the console_lock to be taken over by a printk waiter.
* console_lock. Otherwise it is set to false.
*
* Returns false if the given console has no next record to print, otherwise
* true.
*
* Requires the console_lock if @handover is non-NULL.
* Requires con->lock otherwise.
* Requires the console_lock.
*/
static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
char *dropped_text, bool *handover)
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
char *dropped_text, bool *handover)
{
static atomic_t panic_console_dropped = ATOMIC_INIT(0);
static int panic_console_dropped;
struct printk_info info;
struct printk_record r;
unsigned long flags;
@ -2866,8 +2685,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
if (handover)
*handover = false;
*handover = false;
if (!prb_read_valid(prb, con->seq, &r))
return false;
@ -2875,8 +2693,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
if (con->seq != r.info->seq) {
con->dropped += r.info->seq - con->seq;
con->seq = r.info->seq;
if (panic_in_progress() &&
atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
if (panic_in_progress() && panic_console_dropped++ > 10) {
suppress_panic_printk = 1;
pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
}
@ -2898,61 +2715,31 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
}
if (handover) {
/*
* While actively printing out messages, if another printk()
* were to occur on another CPU, it may wait for this one to
* finish. This task can not be preempted if there is a
* waiter waiting to take over.
*
* Interrupts are disabled because the hand over to a waiter
* must not be interrupted until the hand over is completed
* (@console_waiter is cleared).
*/
printk_safe_enter_irqsave(flags);
console_lock_spinning_enable();
/* don't trace irqsoff print latency */
stop_critical_timings();
}
/*
* While actively printing out messages, if another printk()
* were to occur on another CPU, it may wait for this one to
* finish. This task can not be preempted if there is a
* waiter waiting to take over.
*
* Interrupts are disabled because the hand over to a waiter
* must not be interrupted until the hand over is completed
* (@console_waiter is cleared).
*/
printk_safe_enter_irqsave(flags);
console_lock_spinning_enable();
stop_critical_timings(); /* don't trace print latency */
call_console_driver(con, write_text, len, dropped_text);
start_critical_timings();
con->seq++;
if (handover) {
start_critical_timings();
*handover = console_lock_spinning_disable_and_check();
printk_safe_exit_irqrestore(flags);
}
*handover = console_lock_spinning_disable_and_check();
printk_safe_exit_irqrestore(flags);
skip:
return true;
}
/*
* Print a record for a given console, but allow another printk() caller to
* take over the console_lock and continue printing.
*
* Requires the console_lock, but depending on @handover after the call, the
* caller may no longer have the console_lock.
*
* See __console_emit_next_record() for argument and return details.
*/
static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text,
char *dropped_text, bool *handover)
{
/*
* Handovers are only supported if threaded printers are atomically
* blocked. The context taking over the console_lock may be atomic.
*/
if (!console_kthreads_atomically_blocked()) {
*handover = false;
handover = NULL;
}
return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
}
/*
* Print out all remaining records to all consoles.
*
@ -2971,8 +2758,8 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex
* were flushed to all usable consoles. A returned false informs the caller
* that everything was not flushed (either there were no usable consoles or
* another context has taken over printing or it is a panic situation and this
* is not the panic CPU or direct printing is not preferred). Regardless the
* reason, the caller should assume it is not useful to immediately try again.
* is not the panic CPU). Regardless the reason, the caller should assume it
* is not useful to immediately try again.
*
* Requires the console_lock.
*/
@ -2989,10 +2776,6 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
*handover = false;
do {
/* Let the kthread printers do the work if they can. */
if (!allow_direct_printing())
return false;
any_progress = false;
for_each_console(con) {
@ -3004,11 +2787,13 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
if (con->flags & CON_EXTENDED) {
/* Extended consoles do not print "dropped messages". */
progress = console_emit_next_record_transferable(con, &text[0],
&ext_text[0], NULL, handover);
progress = console_emit_next_record(con, &text[0],
&ext_text[0], NULL,
handover);
} else {
progress = console_emit_next_record_transferable(con, &text[0],
NULL, &dropped_text[0], handover);
progress = console_emit_next_record(con, &text[0],
NULL, &dropped_text[0],
handover);
}
if (*handover)
return false;
@ -3123,13 +2908,10 @@ void console_unblank(void)
if (oops_in_progress) {
if (down_trylock_console_sem() != 0)
return;
if (!console_kthreads_atomic_tryblock()) {
up_console_sem();
return;
}
} else
console_lock();
console_locked = 1;
console_may_schedule = 0;
for_each_console(c)
if ((c->flags & CON_ENABLED) && c->unblank)
@ -3408,10 +3190,6 @@ void register_console(struct console *newcon)
nr_ext_console_drivers++;
newcon->dropped = 0;
newcon->thread = NULL;
newcon->blocked = true;
mutex_init(&newcon->lock);
if (newcon->flags & CON_PRINTBUFFER) {
/* Get a consistent copy of @syslog_seq. */
mutex_lock(&syslog_lock);
@ -3421,10 +3199,6 @@ void register_console(struct console *newcon)
/* Begin with next message. */
newcon->seq = prb_next_seq(prb);
}
if (printk_kthreads_available)
printk_start_kthread(newcon);
console_unlock();
console_sysfs_notify();
@ -3451,7 +3225,6 @@ EXPORT_SYMBOL(register_console);
int unregister_console(struct console *console)
{
struct task_struct *thd;
struct console *con;
int res;
@ -3492,20 +3265,7 @@ int unregister_console(struct console *console)
console_drivers->flags |= CON_CONSDEV;
console->flags &= ~CON_ENABLED;
/*
* console->thread can only be cleared under the console lock. But
* stopping the thread must be done without the console lock. The
* task that clears @thread is the task that stops the kthread.
*/
thd = console->thread;
console->thread = NULL;
console_unlock();
if (thd)
kthread_stop(thd);
console_sysfs_notify();
if (console->exit)
@ -3601,20 +3361,6 @@ static int __init printk_late_init(void)
}
late_initcall(printk_late_init);
static int __init printk_activate_kthreads(void)
{
struct console *con;
console_lock();
printk_kthreads_available = true;
for_each_console(con)
printk_start_kthread(con);
console_unlock();
return 0;
}
early_initcall(printk_activate_kthreads);
#if defined CONFIG_PRINTK
/* If @con is specified, only wait for that console. Otherwise wait for all. */
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
@ -3689,209 +3435,11 @@ bool pr_flush(int timeout_ms, bool reset_on_progress)
}
EXPORT_SYMBOL(pr_flush);
static void __printk_fallback_preferred_direct(void)
{
printk_prefer_direct_enter();
pr_err("falling back to preferred direct printing\n");
printk_kthreads_available = false;
}
/*
* Enter preferred direct printing, but never exit. Mark console threads as
* unavailable. The system is then forever in preferred direct printing and
* any printing threads will exit.
*
* Must *not* be called under console_lock. Use
* __printk_fallback_preferred_direct() if already holding console_lock.
*/
static void printk_fallback_preferred_direct(void)
{
console_lock();
__printk_fallback_preferred_direct();
console_unlock();
}
/*
* Print a record for a given console, not allowing another printk() caller
* to take over. This is appropriate for contexts that do not have the
* console_lock.
*
* See __console_emit_next_record() for argument and return details.
*/
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
char *dropped_text)
{
return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
}
static bool printer_should_wake(struct console *con, u64 seq)
{
short flags;
if (kthread_should_stop() || !printk_kthreads_available)
return true;
if (con->blocked ||
console_kthreads_atomically_blocked() ||
block_console_kthreads ||
system_state > SYSTEM_RUNNING ||
oops_in_progress) {
return false;
}
/*
* This is an unsafe read from con->flags, but a false positive is
* not a problem. Worst case it would allow the printer to wake up
* although it is disabled. But the printer will notice that when
* attempting to print and instead go back to sleep.
*/
flags = data_race(READ_ONCE(con->flags));
if (!__console_is_usable(flags))
return false;
return prb_read_valid(prb, seq, NULL);
}
static int printk_kthread_func(void *data)
{
struct console *con = data;
char *dropped_text = NULL;
char *ext_text = NULL;
u64 seq = 0;
char *text;
int error;
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
if (!text) {
con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
printk_fallback_preferred_direct();
goto out;
}
if (con->flags & CON_EXTENDED) {
ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
if (!ext_text) {
con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n");
printk_fallback_preferred_direct();
goto out;
}
} else {
dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
if (!dropped_text) {
con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n");
printk_fallback_preferred_direct();
goto out;
}
}
con_printk(KERN_INFO, con, "printing thread started\n");
for (;;) {
/*
* Guarantee this task is visible on the waitqueue before
* checking the wake condition.
*
* The full memory barrier within set_current_state() of
* prepare_to_wait_event() pairs with the full memory barrier
* within wq_has_sleeper().
*
* This pairs with __wake_up_klogd:A.
*/
error = wait_event_interruptible(log_wait,
printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
if (kthread_should_stop() || !printk_kthreads_available)
break;
if (error)
continue;
error = mutex_lock_interruptible(&con->lock);
if (error)
continue;
if (con->blocked ||
!console_kthread_printing_tryenter()) {
/* Another context has locked the console_lock. */
mutex_unlock(&con->lock);
continue;
}
/*
* Although this context has not locked the console_lock, it
* is known that the console_lock is not locked and it is not
* possible for any other context to lock the console_lock.
* Therefore it is safe to read con->flags.
*/
if (!__console_is_usable(con->flags)) {
console_kthread_printing_exit();
mutex_unlock(&con->lock);
continue;
}
/*
* Even though the printk kthread is always preemptible, it is
* still not allowed to call cond_resched() from within
* console drivers. The task may become non-preemptible in the
* console driver call chain. For example, vt_console_print()
* takes a spinlock and then can call into fbcon_redraw(),
* which can conditionally invoke cond_resched().
*/
console_may_schedule = 0;
console_emit_next_record(con, text, ext_text, dropped_text);
seq = con->seq;
console_kthread_printing_exit();
mutex_unlock(&con->lock);
}
con_printk(KERN_INFO, con, "printing thread stopped\n");
out:
kfree(dropped_text);
kfree(ext_text);
kfree(text);
console_lock();
/*
* If this kthread is being stopped by another task, con->thread will
* already be NULL. That is fine. The important thing is that it is
* NULL after the kthread exits.
*/
con->thread = NULL;
console_unlock();
return 0;
}
/* Must be called under console_lock. */
static void printk_start_kthread(struct console *con)
{
/*
* Do not start a kthread if there is no write() callback. The
* kthreads assume the write() callback exists.
*/
if (!con->write)
return;
con->thread = kthread_run(printk_kthread_func, con,
"pr/%s%d", con->name, con->index);
if (IS_ERR(con->thread)) {
con->thread = NULL;
con_printk(KERN_ERR, con, "unable to start printing thread\n");
__printk_fallback_preferred_direct();
return;
}
}
/*
* Delayed printk version, for scheduler-internal messages:
*/
#define PRINTK_PENDING_WAKEUP 0x01
#define PRINTK_PENDING_DIRECT_OUTPUT 0x02
#define PRINTK_PENDING_WAKEUP 0x01
#define PRINTK_PENDING_OUTPUT 0x02
static DEFINE_PER_CPU(int, printk_pending);
@ -3899,14 +3447,10 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
int pending = this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_DIRECT_OUTPUT) {
printk_prefer_direct_enter();
if (pending & PRINTK_PENDING_OUTPUT) {
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
printk_prefer_direct_exit();
}
if (pending & PRINTK_PENDING_WAKEUP)
@ -3931,11 +3475,10 @@ static void __wake_up_klogd(int val)
* prepare_to_wait_event(), which is called after ___wait_event() adds
* the waiter but before it has checked the wait condition.
*
* This pairs with devkmsg_read:A, syslog_print:A, and
* printk_kthread_func:A.
* This pairs with devkmsg_read:A and syslog_print:A.
*/
if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
(val & PRINTK_PENDING_DIRECT_OUTPUT)) {
(val & PRINTK_PENDING_OUTPUT)) {
this_cpu_or(printk_pending, val);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
}
@ -3953,17 +3496,7 @@ void defer_console_output(void)
* New messages may have been added directly to the ringbuffer
* using vprintk_store(), so wake any waiters as well.
*/
int val = PRINTK_PENDING_WAKEUP;
/*
* Make sure that some context will print the messages when direct
* printing is allowed. This happens in situations when the kthreads
* may not be as reliable or perhaps unusable.
*/
if (allow_direct_printing())
val |= PRINTK_PENDING_DIRECT_OUTPUT;
__wake_up_klogd(val);
__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
}
void printk_trigger_flush(void)

View file

@ -8,9 +8,7 @@
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/printk.h>
#include <linux/console.h>
#include <linux/kprobes.h>
#include <linux/delay.h>
#include "internal.h"
@ -52,33 +50,3 @@ asmlinkage int vprintk(const char *fmt, va_list args)
return vprintk_default(fmt, args);
}
EXPORT_SYMBOL(vprintk);
/**
* try_block_console_kthreads() - Try to block console kthreads and
* make the global console_lock() avaialble
*
* @timeout_ms: The maximum time (in ms) to wait.
*
* Prevent console kthreads from starting processing new messages. Wait
* until the global console_lock() become available.
*
* Context: Can be called in any context.
*/
void try_block_console_kthreads(int timeout_ms)
{
block_console_kthreads = true;
/* Do not wait when the console lock could not be safely taken. */
if (this_cpu_read(printk_context) || in_nmi())
return;
while (timeout_ms > 0) {
if (console_trylock()) {
console_unlock();
return;
}
udelay(1000);
timeout_ms -= 1;
}
}

View file

@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps)
* See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings.
*/
printk_prefer_direct_enter();
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps)
*/
set_tsk_need_resched(current);
set_preempt_need_resched();
printk_prefer_direct_exit();
}
static void check_cpu_stall(struct rcu_data *rdp)

View file

@ -82,7 +82,6 @@ void kernel_restart_prepare(char *cmd)
{
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
try_block_console_kthreads(10000);
usermodehelper_disable();
device_shutdown();
}
@ -271,7 +270,6 @@ static void kernel_shutdown_prepare(enum system_states state)
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
system_state = state;
try_block_console_kthreads(10000);
usermodehelper_disable();
device_shutdown();
}
@ -821,11 +819,9 @@ static int __orderly_reboot(void)
ret = run_cmd(reboot_cmd);
if (ret) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly reboot: forcing the issue\n");
emergency_sync();
kernel_restart(NULL);
printk_prefer_direct_exit();
}
return ret;
@ -838,7 +834,6 @@ static int __orderly_poweroff(bool force)
ret = run_cmd(poweroff_cmd);
if (ret && force) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/*
@ -848,7 +843,6 @@ static int __orderly_poweroff(bool force)
*/
emergency_sync();
kernel_power_off();
printk_prefer_direct_exit();
}
return ret;
@ -906,8 +900,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
{
printk_prefer_direct_enter();
/*
* We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off
@ -924,8 +916,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
*/
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
emergency_restart();
printk_prefer_direct_exit();
}
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
@ -964,13 +954,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
printk_prefer_direct_enter();
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
/* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed))
goto out;
return;
/*
* Queue a backup emergency shutdown in the event of
@ -978,8 +966,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
*/
hw_failure_emergency_poweroff(ms_until_forced);
orderly_poweroff(true);
out:
printk_prefer_direct_exit();
}
EXPORT_SYMBOL_GPL(hw_protection_shutdown);

View file

@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */
update_report_ts();
printk_prefer_direct_enter();
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic)
panic("softlockup: hung tasks");
printk_prefer_direct_exit();
}
return HRTIMER_RESTART;

View file

@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
printk_prefer_direct_enter();
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
this_cpu);
print_modules();
@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
printk_prefer_direct_exit();
__this_cpu_write(hard_watchdog_warn, true);
return;
}